var/home/core/zuul-output/0000755000175000017500000000000015067005702014527 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015067012364015475 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000004471254715067012353017715 0ustar rootrootSep 30 16:59:12 crc systemd[1]: Starting Kubernetes Kubelet... Sep 30 16:59:12 crc restorecon[4678]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:12 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 30 16:59:13 crc restorecon[4678]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 30 16:59:13 crc restorecon[4678]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Sep 30 16:59:13 crc kubenswrapper[4818]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Sep 30 16:59:13 crc kubenswrapper[4818]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Sep 30 16:59:13 crc kubenswrapper[4818]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Sep 30 16:59:13 crc kubenswrapper[4818]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Sep 30 16:59:13 crc kubenswrapper[4818]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Sep 30 16:59:13 crc kubenswrapper[4818]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.744917 4818 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751070 4818 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751100 4818 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751109 4818 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751118 4818 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751128 4818 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751136 4818 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751144 4818 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751154 4818 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751163 4818 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751173 4818 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751182 4818 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751197 4818 feature_gate.go:330] unrecognized feature gate: OVNObservability Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751205 4818 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751213 4818 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751221 4818 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751229 4818 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751237 4818 feature_gate.go:330] unrecognized feature gate: SignatureStores Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751244 4818 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751252 4818 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751263 4818 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751274 4818 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751283 4818 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751291 4818 feature_gate.go:330] unrecognized feature gate: NewOLM Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751298 4818 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751306 4818 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751314 4818 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751321 4818 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751329 4818 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751337 4818 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751348 4818 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751359 4818 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751370 4818 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751380 4818 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751389 4818 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751397 4818 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751406 4818 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751414 4818 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751423 4818 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751430 4818 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751439 4818 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751447 4818 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751456 4818 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751464 4818 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751471 4818 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751479 4818 feature_gate.go:330] unrecognized feature gate: InsightsConfig Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751487 4818 feature_gate.go:330] unrecognized feature gate: Example Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751495 4818 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751502 4818 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751510 4818 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751517 4818 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751524 4818 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751532 4818 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751539 4818 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751547 4818 feature_gate.go:330] unrecognized feature gate: PinnedImages Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751554 4818 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751562 4818 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751569 4818 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751577 4818 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751584 4818 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751592 4818 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751599 4818 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751609 4818 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751617 4818 feature_gate.go:330] unrecognized feature gate: GatewayAPI Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751624 4818 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751632 4818 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751639 4818 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751647 4818 feature_gate.go:330] unrecognized feature gate: PlatformOperators Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751655 4818 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751663 4818 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751673 4818 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.751683 4818 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.751818 4818 flags.go:64] FLAG: --address="0.0.0.0" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.751835 4818 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.751850 4818 flags.go:64] FLAG: --anonymous-auth="true" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.751861 4818 flags.go:64] FLAG: --application-metrics-count-limit="100" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.751872 4818 flags.go:64] FLAG: --authentication-token-webhook="false" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.751882 4818 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.751894 4818 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.751905 4818 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.751914 4818 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.751950 4818 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.751960 4818 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.751971 4818 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.751980 4818 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.751988 4818 flags.go:64] FLAG: --cgroup-root="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.751998 4818 flags.go:64] FLAG: --cgroups-per-qos="true" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752007 4818 flags.go:64] FLAG: --client-ca-file="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752015 4818 flags.go:64] FLAG: --cloud-config="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752024 4818 flags.go:64] FLAG: --cloud-provider="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752033 4818 flags.go:64] FLAG: --cluster-dns="[]" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752043 4818 flags.go:64] FLAG: --cluster-domain="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752051 4818 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752061 4818 flags.go:64] FLAG: --config-dir="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752070 4818 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752079 4818 flags.go:64] FLAG: --container-log-max-files="5" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752090 4818 flags.go:64] FLAG: --container-log-max-size="10Mi" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752099 4818 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752108 4818 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752118 4818 flags.go:64] FLAG: --containerd-namespace="k8s.io" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752126 4818 flags.go:64] FLAG: --contention-profiling="false" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752135 4818 flags.go:64] FLAG: --cpu-cfs-quota="true" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752144 4818 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752153 4818 flags.go:64] FLAG: --cpu-manager-policy="none" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752164 4818 flags.go:64] FLAG: --cpu-manager-policy-options="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752174 4818 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752185 4818 flags.go:64] FLAG: --enable-controller-attach-detach="true" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752194 4818 flags.go:64] FLAG: --enable-debugging-handlers="true" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752203 4818 flags.go:64] FLAG: --enable-load-reader="false" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752212 4818 flags.go:64] FLAG: --enable-server="true" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752221 4818 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752233 4818 flags.go:64] FLAG: --event-burst="100" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752242 4818 flags.go:64] FLAG: --event-qps="50" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752251 4818 flags.go:64] FLAG: --event-storage-age-limit="default=0" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752260 4818 flags.go:64] FLAG: --event-storage-event-limit="default=0" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752270 4818 flags.go:64] FLAG: --eviction-hard="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752280 4818 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752289 4818 flags.go:64] FLAG: --eviction-minimum-reclaim="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752297 4818 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752307 4818 flags.go:64] FLAG: --eviction-soft="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752317 4818 flags.go:64] FLAG: --eviction-soft-grace-period="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752325 4818 flags.go:64] FLAG: --exit-on-lock-contention="false" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752334 4818 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752343 4818 flags.go:64] FLAG: --experimental-mounter-path="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752351 4818 flags.go:64] FLAG: --fail-cgroupv1="false" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752360 4818 flags.go:64] FLAG: --fail-swap-on="true" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752369 4818 flags.go:64] FLAG: --feature-gates="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752380 4818 flags.go:64] FLAG: --file-check-frequency="20s" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752389 4818 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752399 4818 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752410 4818 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752420 4818 flags.go:64] FLAG: --healthz-port="10248" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752429 4818 flags.go:64] FLAG: --help="false" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752438 4818 flags.go:64] FLAG: --hostname-override="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752446 4818 flags.go:64] FLAG: --housekeeping-interval="10s" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752455 4818 flags.go:64] FLAG: --http-check-frequency="20s" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752464 4818 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752473 4818 flags.go:64] FLAG: --image-credential-provider-config="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752482 4818 flags.go:64] FLAG: --image-gc-high-threshold="85" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752491 4818 flags.go:64] FLAG: --image-gc-low-threshold="80" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752500 4818 flags.go:64] FLAG: --image-service-endpoint="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752509 4818 flags.go:64] FLAG: --kernel-memcg-notification="false" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752518 4818 flags.go:64] FLAG: --kube-api-burst="100" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752527 4818 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752536 4818 flags.go:64] FLAG: --kube-api-qps="50" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752544 4818 flags.go:64] FLAG: --kube-reserved="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752555 4818 flags.go:64] FLAG: --kube-reserved-cgroup="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752564 4818 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752573 4818 flags.go:64] FLAG: --kubelet-cgroups="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752581 4818 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752590 4818 flags.go:64] FLAG: --lock-file="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752599 4818 flags.go:64] FLAG: --log-cadvisor-usage="false" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752607 4818 flags.go:64] FLAG: --log-flush-frequency="5s" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752616 4818 flags.go:64] FLAG: --log-json-info-buffer-size="0" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752640 4818 flags.go:64] FLAG: --log-json-split-stream="false" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752649 4818 flags.go:64] FLAG: --log-text-info-buffer-size="0" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752657 4818 flags.go:64] FLAG: --log-text-split-stream="false" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752666 4818 flags.go:64] FLAG: --logging-format="text" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752675 4818 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752685 4818 flags.go:64] FLAG: --make-iptables-util-chains="true" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752693 4818 flags.go:64] FLAG: --manifest-url="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752702 4818 flags.go:64] FLAG: --manifest-url-header="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752714 4818 flags.go:64] FLAG: --max-housekeeping-interval="15s" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752723 4818 flags.go:64] FLAG: --max-open-files="1000000" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752733 4818 flags.go:64] FLAG: --max-pods="110" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752742 4818 flags.go:64] FLAG: --maximum-dead-containers="-1" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752751 4818 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752761 4818 flags.go:64] FLAG: --memory-manager-policy="None" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752770 4818 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752779 4818 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752789 4818 flags.go:64] FLAG: --node-ip="192.168.126.11" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752798 4818 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752816 4818 flags.go:64] FLAG: --node-status-max-images="50" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752825 4818 flags.go:64] FLAG: --node-status-update-frequency="10s" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752834 4818 flags.go:64] FLAG: --oom-score-adj="-999" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752843 4818 flags.go:64] FLAG: --pod-cidr="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752853 4818 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752866 4818 flags.go:64] FLAG: --pod-manifest-path="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752875 4818 flags.go:64] FLAG: --pod-max-pids="-1" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752885 4818 flags.go:64] FLAG: --pods-per-core="0" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752893 4818 flags.go:64] FLAG: --port="10250" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752902 4818 flags.go:64] FLAG: --protect-kernel-defaults="false" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752911 4818 flags.go:64] FLAG: --provider-id="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752945 4818 flags.go:64] FLAG: --qos-reserved="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752954 4818 flags.go:64] FLAG: --read-only-port="10255" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752964 4818 flags.go:64] FLAG: --register-node="true" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.752973 4818 flags.go:64] FLAG: --register-schedulable="true" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753012 4818 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753026 4818 flags.go:64] FLAG: --registry-burst="10" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753035 4818 flags.go:64] FLAG: --registry-qps="5" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753044 4818 flags.go:64] FLAG: --reserved-cpus="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753052 4818 flags.go:64] FLAG: --reserved-memory="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753063 4818 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753072 4818 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753081 4818 flags.go:64] FLAG: --rotate-certificates="false" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753090 4818 flags.go:64] FLAG: --rotate-server-certificates="false" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753100 4818 flags.go:64] FLAG: --runonce="false" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753108 4818 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753118 4818 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753127 4818 flags.go:64] FLAG: --seccomp-default="false" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753135 4818 flags.go:64] FLAG: --serialize-image-pulls="true" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753144 4818 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753153 4818 flags.go:64] FLAG: --storage-driver-db="cadvisor" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753163 4818 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753172 4818 flags.go:64] FLAG: --storage-driver-password="root" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753181 4818 flags.go:64] FLAG: --storage-driver-secure="false" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753189 4818 flags.go:64] FLAG: --storage-driver-table="stats" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753198 4818 flags.go:64] FLAG: --storage-driver-user="root" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753208 4818 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753217 4818 flags.go:64] FLAG: --sync-frequency="1m0s" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753226 4818 flags.go:64] FLAG: --system-cgroups="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753234 4818 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753249 4818 flags.go:64] FLAG: --system-reserved-cgroup="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753259 4818 flags.go:64] FLAG: --tls-cert-file="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753267 4818 flags.go:64] FLAG: --tls-cipher-suites="[]" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753278 4818 flags.go:64] FLAG: --tls-min-version="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753286 4818 flags.go:64] FLAG: --tls-private-key-file="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753295 4818 flags.go:64] FLAG: --topology-manager-policy="none" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753303 4818 flags.go:64] FLAG: --topology-manager-policy-options="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753312 4818 flags.go:64] FLAG: --topology-manager-scope="container" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753321 4818 flags.go:64] FLAG: --v="2" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753332 4818 flags.go:64] FLAG: --version="false" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753343 4818 flags.go:64] FLAG: --vmodule="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753353 4818 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.753363 4818 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753555 4818 feature_gate.go:330] unrecognized feature gate: PinnedImages Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753565 4818 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753573 4818 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753581 4818 feature_gate.go:330] unrecognized feature gate: OVNObservability Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753589 4818 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753597 4818 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753605 4818 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753613 4818 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753622 4818 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753630 4818 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753639 4818 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753646 4818 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753655 4818 feature_gate.go:330] unrecognized feature gate: SignatureStores Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753663 4818 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753671 4818 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753679 4818 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753687 4818 feature_gate.go:330] unrecognized feature gate: Example Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753694 4818 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753704 4818 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753713 4818 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753722 4818 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753729 4818 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753739 4818 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753747 4818 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753755 4818 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753763 4818 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753770 4818 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753778 4818 feature_gate.go:330] unrecognized feature gate: NewOLM Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753785 4818 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753793 4818 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753801 4818 feature_gate.go:330] unrecognized feature gate: PlatformOperators Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753808 4818 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753816 4818 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753824 4818 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753831 4818 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753839 4818 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753847 4818 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753854 4818 feature_gate.go:330] unrecognized feature gate: InsightsConfig Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753862 4818 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753869 4818 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753876 4818 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753884 4818 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753903 4818 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753911 4818 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753947 4818 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753956 4818 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753965 4818 feature_gate.go:330] unrecognized feature gate: GatewayAPI Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753973 4818 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753981 4818 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753988 4818 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.753996 4818 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754004 4818 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754015 4818 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754024 4818 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754034 4818 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754042 4818 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754050 4818 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754058 4818 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754066 4818 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754075 4818 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754083 4818 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754090 4818 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754101 4818 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754110 4818 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754120 4818 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754129 4818 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754137 4818 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754146 4818 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754155 4818 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754163 4818 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.754172 4818 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.754195 4818 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.768618 4818 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.768672 4818 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.768814 4818 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.768829 4818 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.768840 4818 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.768848 4818 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.768858 4818 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.768866 4818 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.768874 4818 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.768882 4818 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.768890 4818 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.768899 4818 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.768907 4818 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.768914 4818 feature_gate.go:330] unrecognized feature gate: GatewayAPI Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.768945 4818 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.768953 4818 feature_gate.go:330] unrecognized feature gate: OVNObservability Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.768961 4818 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.768969 4818 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.768977 4818 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.768984 4818 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.768992 4818 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769003 4818 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769046 4818 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769056 4818 feature_gate.go:330] unrecognized feature gate: NewOLM Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769064 4818 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769072 4818 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769081 4818 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769089 4818 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769098 4818 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769108 4818 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769118 4818 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769126 4818 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769134 4818 feature_gate.go:330] unrecognized feature gate: SignatureStores Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769143 4818 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769154 4818 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769163 4818 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769173 4818 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769182 4818 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769190 4818 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769199 4818 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769206 4818 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769214 4818 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769222 4818 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769230 4818 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769238 4818 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769245 4818 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769253 4818 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769261 4818 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769268 4818 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769276 4818 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769284 4818 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769291 4818 feature_gate.go:330] unrecognized feature gate: PlatformOperators Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769299 4818 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769307 4818 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769314 4818 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769322 4818 feature_gate.go:330] unrecognized feature gate: InsightsConfig Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769329 4818 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769338 4818 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769347 4818 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769355 4818 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769363 4818 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769374 4818 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769384 4818 feature_gate.go:330] unrecognized feature gate: Example Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769394 4818 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769403 4818 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769414 4818 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769423 4818 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769431 4818 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769439 4818 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769447 4818 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769456 4818 feature_gate.go:330] unrecognized feature gate: PinnedImages Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769464 4818 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769473 4818 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.769486 4818 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769703 4818 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769718 4818 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769727 4818 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769736 4818 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769745 4818 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769753 4818 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769761 4818 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769769 4818 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769777 4818 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769786 4818 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769794 4818 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769801 4818 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769811 4818 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769822 4818 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769831 4818 feature_gate.go:330] unrecognized feature gate: PinnedImages Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769838 4818 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769846 4818 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769854 4818 feature_gate.go:330] unrecognized feature gate: SignatureStores Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769862 4818 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769869 4818 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769877 4818 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769885 4818 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769892 4818 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769901 4818 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769909 4818 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769917 4818 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769950 4818 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769958 4818 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769966 4818 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769974 4818 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769982 4818 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769990 4818 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.769997 4818 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770005 4818 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770014 4818 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770022 4818 feature_gate.go:330] unrecognized feature gate: GatewayAPI Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770030 4818 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770040 4818 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770050 4818 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770058 4818 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770067 4818 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770076 4818 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770084 4818 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770092 4818 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770099 4818 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770107 4818 feature_gate.go:330] unrecognized feature gate: NewOLM Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770115 4818 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770123 4818 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770130 4818 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770138 4818 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770146 4818 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770153 4818 feature_gate.go:330] unrecognized feature gate: Example Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770162 4818 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770169 4818 feature_gate.go:330] unrecognized feature gate: PlatformOperators Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770180 4818 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770192 4818 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770202 4818 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770211 4818 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770219 4818 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770229 4818 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770239 4818 feature_gate.go:330] unrecognized feature gate: InsightsConfig Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770247 4818 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770256 4818 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770264 4818 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770272 4818 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770280 4818 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770287 4818 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770295 4818 feature_gate.go:330] unrecognized feature gate: OVNObservability Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770303 4818 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770311 4818 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.770319 4818 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.770332 4818 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.771691 4818 server.go:940] "Client rotation is on, will bootstrap in background" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.777957 4818 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.778073 4818 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.780020 4818 server.go:997] "Starting client certificate rotation" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.780050 4818 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.781912 4818 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-23 01:53:08.879710361 +0000 UTC Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.782068 4818 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 1280h53m55.097647808s for next certificate rotation Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.816564 4818 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.822010 4818 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.848251 4818 log.go:25] "Validated CRI v1 runtime API" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.892219 4818 log.go:25] "Validated CRI v1 image API" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.894980 4818 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.903883 4818 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-09-30-16-55-34-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.903993 4818 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.932250 4818 manager.go:217] Machine: {Timestamp:2025-09-30 16:59:13.929688913 +0000 UTC m=+0.683960809 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:6f343d33-25c9-4fa3-9228-821c4ed396ef BootID:5773b9f7-7ba9-4297-8817-bd7e24295211 Filesystems:[{Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:75:9c:9b Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:75:9c:9b Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:0f:d9:92 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:61:03:fc Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:12:48:c8 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:3a:eb:df Speed:-1 Mtu:1496} {Name:eth10 MacAddress:2e:c9:a1:1e:db:18 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:4e:b3:7a:fe:e8:21 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.932666 4818 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.932957 4818 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.933444 4818 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.933790 4818 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.933850 4818 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.934880 4818 topology_manager.go:138] "Creating topology manager with none policy" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.934914 4818 container_manager_linux.go:303] "Creating device plugin manager" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.935493 4818 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.935535 4818 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.935748 4818 state_mem.go:36] "Initialized new in-memory state store" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.935880 4818 server.go:1245] "Using root directory" path="/var/lib/kubelet" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.940438 4818 kubelet.go:418] "Attempting to sync node with API server" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.940483 4818 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.940644 4818 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.940688 4818 kubelet.go:324] "Adding apiserver pod source" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.940712 4818 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.946122 4818 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.948141 4818 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.949855 4818 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.952341 4818 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.952385 4818 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.952399 4818 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.952413 4818 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.952436 4818 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.952448 4818 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.952459 4818 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.952478 4818 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.952493 4818 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.952505 4818 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.952523 4818 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.952538 4818 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.957725 4818 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.24:6443: connect: connection refused Sep 30 16:59:13 crc kubenswrapper[4818]: E0930 16:59:13.957867 4818 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.24:6443: connect: connection refused" logger="UnhandledError" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.957941 4818 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.957772 4818 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.24:6443: connect: connection refused Sep 30 16:59:13 crc kubenswrapper[4818]: E0930 16:59:13.958572 4818 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.24:6443: connect: connection refused" logger="UnhandledError" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.959476 4818 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.24:6443: connect: connection refused Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.960116 4818 server.go:1280] "Started kubelet" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.960440 4818 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.960373 4818 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.962041 4818 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Sep 30 16:59:13 crc systemd[1]: Started Kubernetes Kubelet. Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.963998 4818 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.964063 4818 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.964235 4818 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 23:38:56.900336093 +0000 UTC Sep 30 16:59:13 crc kubenswrapper[4818]: E0930 16:59:13.964258 4818 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.964271 4818 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 2238h39m42.936067443s for next certificate rotation Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.964403 4818 volume_manager.go:287] "The desired_state_of_world populator starts" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.964436 4818 volume_manager.go:289] "Starting Kubelet Volume Manager" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.964650 4818 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Sep 30 16:59:13 crc kubenswrapper[4818]: W0930 16:59:13.965033 4818 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.24:6443: connect: connection refused Sep 30 16:59:13 crc kubenswrapper[4818]: E0930 16:59:13.965101 4818 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.24:6443: connect: connection refused" logger="UnhandledError" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.966373 4818 factory.go:55] Registering systemd factory Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.966402 4818 factory.go:221] Registration of the systemd container factory successfully Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.967399 4818 server.go:460] "Adding debug handlers to kubelet server" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.967408 4818 factory.go:153] Registering CRI-O factory Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.968134 4818 factory.go:221] Registration of the crio container factory successfully Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.968337 4818 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.968484 4818 factory.go:103] Registering Raw factory Sep 30 16:59:13 crc kubenswrapper[4818]: E0930 16:59:13.968709 4818 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.24:6443: connect: connection refused" interval="200ms" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.968865 4818 manager.go:1196] Started watching for new ooms in manager Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.970137 4818 manager.go:319] Starting recovery of all containers Sep 30 16:59:13 crc kubenswrapper[4818]: E0930 16:59:13.970162 4818 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.24:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.186a1df7a857a261 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-09-30 16:59:13.960043105 +0000 UTC m=+0.714314951,LastTimestamp:2025-09-30 16:59:13.960043105 +0000 UTC m=+0.714314951,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.981667 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.981722 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.981748 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.981769 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.981792 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.981805 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.981817 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.981832 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.981846 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.981864 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.981878 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.981896 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.981911 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.981952 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.981965 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.981980 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.981997 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982010 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982028 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982043 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982057 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982069 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982080 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982099 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982114 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982136 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982154 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982172 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982184 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982200 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982212 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982230 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982246 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982261 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982279 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982290 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982305 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982316 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982328 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982345 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982357 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982374 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982390 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982402 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982418 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982430 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982446 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982458 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982475 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982493 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982506 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982522 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982543 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982563 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982681 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982698 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982712 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982725 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982739 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982748 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982759 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982769 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982778 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982794 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982809 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982823 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982841 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982854 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982871 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982884 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982897 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.982915 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986253 4818 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986296 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986317 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986338 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986354 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986373 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986384 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986400 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986419 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986432 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986448 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986468 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986483 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986501 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986518 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986533 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986551 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986565 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986583 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986594 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986605 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986618 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986630 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986644 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986658 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986670 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986683 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986695 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986721 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986732 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986742 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986755 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986766 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986787 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986802 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986814 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986828 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986843 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986854 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986869 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986884 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986897 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986910 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986938 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986951 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986961 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986971 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.986983 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987003 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987015 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987024 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987034 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987045 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987055 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987067 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987076 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987086 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987097 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987109 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987120 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987131 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987140 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987152 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987162 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987173 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987182 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987192 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987203 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987212 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987225 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987234 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987244 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987256 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987265 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987277 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987286 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987295 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987306 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987315 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987327 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987336 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987345 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987357 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987366 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987378 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987388 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987398 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987410 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987420 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987433 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987447 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987456 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987470 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987480 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987489 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987502 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987511 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987523 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987534 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987544 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987560 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987571 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987585 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987597 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987610 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987622 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987631 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987642 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987651 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987661 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987672 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987683 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987700 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987713 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987726 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987739 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987748 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987761 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987770 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987779 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987791 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987801 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987814 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987822 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987831 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987842 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987851 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987861 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987871 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987882 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987893 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987902 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987913 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987948 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987958 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987971 4818 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987980 4818 reconstruct.go:97] "Volume reconstruction finished" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.987987 4818 reconciler.go:26] "Reconciler: start to sync state" Sep 30 16:59:13 crc kubenswrapper[4818]: I0930 16:59:13.999311 4818 manager.go:324] Recovery completed Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.012698 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.015270 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.015341 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.015360 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.016803 4818 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.016717 4818 cpu_manager.go:225] "Starting CPU manager" policy="none" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.017317 4818 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.017393 4818 state_mem.go:36] "Initialized new in-memory state store" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.018785 4818 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.018886 4818 status_manager.go:217] "Starting to sync pod status with apiserver" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.019104 4818 kubelet.go:2335] "Starting kubelet main sync loop" Sep 30 16:59:14 crc kubenswrapper[4818]: E0930 16:59:14.019231 4818 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Sep 30 16:59:14 crc kubenswrapper[4818]: W0930 16:59:14.019778 4818 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.24:6443: connect: connection refused Sep 30 16:59:14 crc kubenswrapper[4818]: E0930 16:59:14.019905 4818 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.24:6443: connect: connection refused" logger="UnhandledError" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.052431 4818 policy_none.go:49] "None policy: Start" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.053835 4818 memory_manager.go:170] "Starting memorymanager" policy="None" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.053910 4818 state_mem.go:35] "Initializing new in-memory state store" Sep 30 16:59:14 crc kubenswrapper[4818]: E0930 16:59:14.064395 4818 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Sep 30 16:59:14 crc kubenswrapper[4818]: E0930 16:59:14.119868 4818 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.121452 4818 manager.go:334] "Starting Device Plugin manager" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.121618 4818 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.121710 4818 server.go:79] "Starting device plugin registration server" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.122404 4818 eviction_manager.go:189] "Eviction manager: starting control loop" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.122521 4818 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.122854 4818 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.123050 4818 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.123128 4818 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Sep 30 16:59:14 crc kubenswrapper[4818]: E0930 16:59:14.138210 4818 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Sep 30 16:59:14 crc kubenswrapper[4818]: E0930 16:59:14.169678 4818 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.24:6443: connect: connection refused" interval="400ms" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.223248 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.225121 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.225179 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.225198 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.225239 4818 kubelet_node_status.go:76] "Attempting to register node" node="crc" Sep 30 16:59:14 crc kubenswrapper[4818]: E0930 16:59:14.226000 4818 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.24:6443: connect: connection refused" node="crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.320466 4818 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc"] Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.320662 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.322900 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.322997 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.323016 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.323262 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.323756 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.323842 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.324762 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.324799 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.324808 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.324960 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.325189 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.325247 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.325549 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.325579 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.325591 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.326002 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.326019 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.326027 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.326098 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.326253 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.326305 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.326461 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.326483 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.326491 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.326778 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.326797 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.326804 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.326915 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.327164 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.327203 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.328282 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.328311 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.328328 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.328435 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.328592 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.328626 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.328798 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.328858 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.329812 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.329838 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.329846 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.330127 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.330145 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.330153 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.393616 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.393707 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.393752 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.393783 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.393813 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.393841 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.393970 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.394028 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.394049 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.394069 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.394090 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.394121 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.394136 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.394154 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.394213 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.426590 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.428812 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.428859 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.428873 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.428902 4818 kubelet_node_status.go:76] "Attempting to register node" node="crc" Sep 30 16:59:14 crc kubenswrapper[4818]: E0930 16:59:14.429437 4818 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.24:6443: connect: connection refused" node="crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.494960 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495016 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495041 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495060 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495210 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495260 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495283 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495310 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495211 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495399 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495419 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495444 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495524 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495560 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495567 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495618 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495667 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495641 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495636 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495671 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495645 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495765 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495711 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495802 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495836 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495868 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495878 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.495947 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.496012 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.496067 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: E0930 16:59:14.571135 4818 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.24:6443: connect: connection refused" interval="800ms" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.668182 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.697104 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.711068 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: W0930 16:59:14.722676 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-bd8f757204c3aceb7c84b7ca6326659cbd82f362bc714449826841c542deb815 WatchSource:0}: Error finding container bd8f757204c3aceb7c84b7ca6326659cbd82f362bc714449826841c542deb815: Status 404 returned error can't find the container with id bd8f757204c3aceb7c84b7ca6326659cbd82f362bc714449826841c542deb815 Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.738096 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: W0930 16:59:14.744868 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-cd1fdcaab456e4fc5d94440708fcbebc689c78a465f7408ab4dd6181a2310bee WatchSource:0}: Error finding container cd1fdcaab456e4fc5d94440708fcbebc689c78a465f7408ab4dd6181a2310bee: Status 404 returned error can't find the container with id cd1fdcaab456e4fc5d94440708fcbebc689c78a465f7408ab4dd6181a2310bee Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.747820 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Sep 30 16:59:14 crc kubenswrapper[4818]: W0930 16:59:14.750665 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-9d473c25947955180e341fc367731d807ebfdb10a1e852f3d728eff4afb1b99d WatchSource:0}: Error finding container 9d473c25947955180e341fc367731d807ebfdb10a1e852f3d728eff4afb1b99d: Status 404 returned error can't find the container with id 9d473c25947955180e341fc367731d807ebfdb10a1e852f3d728eff4afb1b99d Sep 30 16:59:14 crc kubenswrapper[4818]: W0930 16:59:14.766864 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-808873777d8433bf9e2aa0c43d47befb1021e1b0ea555304ca34ce7114165ba4 WatchSource:0}: Error finding container 808873777d8433bf9e2aa0c43d47befb1021e1b0ea555304ca34ce7114165ba4: Status 404 returned error can't find the container with id 808873777d8433bf9e2aa0c43d47befb1021e1b0ea555304ca34ce7114165ba4 Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.830096 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.831945 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.831974 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.831982 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.832004 4818 kubelet_node_status.go:76] "Attempting to register node" node="crc" Sep 30 16:59:14 crc kubenswrapper[4818]: E0930 16:59:14.832474 4818 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.24:6443: connect: connection refused" node="crc" Sep 30 16:59:14 crc kubenswrapper[4818]: I0930 16:59:14.961451 4818 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.24:6443: connect: connection refused Sep 30 16:59:15 crc kubenswrapper[4818]: I0930 16:59:15.024414 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"bd83afc7bca4c9953edfd9b7de59a450228262604bef96f091c291159d55109e"} Sep 30 16:59:15 crc kubenswrapper[4818]: I0930 16:59:15.025670 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"9d473c25947955180e341fc367731d807ebfdb10a1e852f3d728eff4afb1b99d"} Sep 30 16:59:15 crc kubenswrapper[4818]: I0930 16:59:15.027360 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"cd1fdcaab456e4fc5d94440708fcbebc689c78a465f7408ab4dd6181a2310bee"} Sep 30 16:59:15 crc kubenswrapper[4818]: I0930 16:59:15.028547 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"bd8f757204c3aceb7c84b7ca6326659cbd82f362bc714449826841c542deb815"} Sep 30 16:59:15 crc kubenswrapper[4818]: I0930 16:59:15.030398 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"808873777d8433bf9e2aa0c43d47befb1021e1b0ea555304ca34ce7114165ba4"} Sep 30 16:59:15 crc kubenswrapper[4818]: W0930 16:59:15.149688 4818 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.24:6443: connect: connection refused Sep 30 16:59:15 crc kubenswrapper[4818]: E0930 16:59:15.149832 4818 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.24:6443: connect: connection refused" logger="UnhandledError" Sep 30 16:59:15 crc kubenswrapper[4818]: W0930 16:59:15.197419 4818 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.24:6443: connect: connection refused Sep 30 16:59:15 crc kubenswrapper[4818]: E0930 16:59:15.197505 4818 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.24:6443: connect: connection refused" logger="UnhandledError" Sep 30 16:59:15 crc kubenswrapper[4818]: W0930 16:59:15.364113 4818 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.24:6443: connect: connection refused Sep 30 16:59:15 crc kubenswrapper[4818]: E0930 16:59:15.364263 4818 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.24:6443: connect: connection refused" logger="UnhandledError" Sep 30 16:59:15 crc kubenswrapper[4818]: E0930 16:59:15.371916 4818 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.24:6443: connect: connection refused" interval="1.6s" Sep 30 16:59:15 crc kubenswrapper[4818]: W0930 16:59:15.464026 4818 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.24:6443: connect: connection refused Sep 30 16:59:15 crc kubenswrapper[4818]: E0930 16:59:15.464155 4818 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.24:6443: connect: connection refused" logger="UnhandledError" Sep 30 16:59:15 crc kubenswrapper[4818]: I0930 16:59:15.632912 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:15 crc kubenswrapper[4818]: I0930 16:59:15.634638 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:15 crc kubenswrapper[4818]: I0930 16:59:15.634683 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:15 crc kubenswrapper[4818]: I0930 16:59:15.634701 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:15 crc kubenswrapper[4818]: I0930 16:59:15.634739 4818 kubelet_node_status.go:76] "Attempting to register node" node="crc" Sep 30 16:59:15 crc kubenswrapper[4818]: E0930 16:59:15.635601 4818 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.24:6443: connect: connection refused" node="crc" Sep 30 16:59:15 crc kubenswrapper[4818]: I0930 16:59:15.960838 4818 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.24:6443: connect: connection refused Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.034943 4818 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="3ce07b4a364f6dd9eb298f6b28cff43312fff556b2c0144dacfd81b20ce0a245" exitCode=0 Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.035022 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"3ce07b4a364f6dd9eb298f6b28cff43312fff556b2c0144dacfd81b20ce0a245"} Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.035089 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.036484 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.036551 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.036570 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.039588 4818 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="cbab9adc4ec15bf97765d816b017f610e4327fc2c0d54bd93886e5f45779ef6a" exitCode=0 Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.039706 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.039645 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"cbab9adc4ec15bf97765d816b017f610e4327fc2c0d54bd93886e5f45779ef6a"} Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.041023 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.041047 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.041060 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.042769 4818 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071" exitCode=0 Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.042816 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071"} Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.042875 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.044896 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.044943 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.044956 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.046254 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb"} Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.046290 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649"} Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.048239 4818 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4" exitCode=0 Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.048271 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4"} Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.048581 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.049967 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.050024 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.050041 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.053635 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.055203 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.055248 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.055280 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:16 crc kubenswrapper[4818]: E0930 16:59:16.794599 4818 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.24:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.186a1df7a857a261 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-09-30 16:59:13.960043105 +0000 UTC m=+0.714314951,LastTimestamp:2025-09-30 16:59:13.960043105 +0000 UTC m=+0.714314951,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Sep 30 16:59:16 crc kubenswrapper[4818]: I0930 16:59:16.960640 4818 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.24:6443: connect: connection refused Sep 30 16:59:16 crc kubenswrapper[4818]: E0930 16:59:16.972916 4818 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.24:6443: connect: connection refused" interval="3.2s" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.053644 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"493d029221297680d7b283e5ffe3d751cd0d58163d87a15519924b99632ef162"} Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.053686 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.053717 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"592d9003e5053cfe8e19b78a635b0d51105ade6a20239c70a991e8ce3b5b03cb"} Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.053741 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"ec96e12decace799896cf4030dcf1991b883886f0253431d9147b9116f45d138"} Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.054737 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.054763 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.054772 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.057957 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c"} Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.057995 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.058000 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47"} Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.058620 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.058662 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.058676 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.061627 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a"} Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.061664 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784"} Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.061675 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6"} Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.061686 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a"} Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.064694 4818 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="a8a5d8360ac6e488a6a8562b4775da06846e14d991ba91a10bf54d712b58231d" exitCode=0 Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.064750 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"a8a5d8360ac6e488a6a8562b4775da06846e14d991ba91a10bf54d712b58231d"} Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.064852 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.065903 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.065959 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.065973 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.066721 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"fe767aa07a81a71568744b34f333457ba6bf7a8aac248c7a05b164fd8daa876d"} Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.066789 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.067519 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.067554 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.067567 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:17 crc kubenswrapper[4818]: W0930 16:59:17.105676 4818 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.24:6443: connect: connection refused Sep 30 16:59:17 crc kubenswrapper[4818]: E0930 16:59:17.105756 4818 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.24:6443: connect: connection refused" logger="UnhandledError" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.236068 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.237246 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.237274 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.237282 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.237306 4818 kubelet_node_status.go:76] "Attempting to register node" node="crc" Sep 30 16:59:17 crc kubenswrapper[4818]: E0930 16:59:17.237713 4818 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.24:6443: connect: connection refused" node="crc" Sep 30 16:59:17 crc kubenswrapper[4818]: I0930 16:59:17.543081 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.073227 4818 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="07a05db447555bcc789b44326b36b21e117f552a1d222b16dcb2f0ef7d51e663" exitCode=0 Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.073366 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.073405 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"07a05db447555bcc789b44326b36b21e117f552a1d222b16dcb2f0ef7d51e663"} Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.074667 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.074726 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.074744 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.079858 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245"} Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.079970 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.079993 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.080011 4818 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.080080 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.080091 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.081200 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.081242 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.081256 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.081320 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.081346 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.081365 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.081522 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.081566 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.081590 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.081846 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.081891 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.081908 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:18 crc kubenswrapper[4818]: I0930 16:59:18.158966 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 30 16:59:19 crc kubenswrapper[4818]: I0930 16:59:19.087474 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"a22b27fa3a62e59f34d002bbe963243f18e9ba2af279de5e756499a1edb7e8cf"} Sep 30 16:59:19 crc kubenswrapper[4818]: I0930 16:59:19.087531 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"faec6a6303999b36b8c58884c0e29781d4c24d0a4b6cbbdf3dfba3f5bb8b3cae"} Sep 30 16:59:19 crc kubenswrapper[4818]: I0930 16:59:19.087548 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"a6479c7bc2687cf28f29f2c2b6655787d99ff5ac2644ccfe9d6a8fd4907a5ab3"} Sep 30 16:59:19 crc kubenswrapper[4818]: I0930 16:59:19.087579 4818 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Sep 30 16:59:19 crc kubenswrapper[4818]: I0930 16:59:19.087672 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:19 crc kubenswrapper[4818]: I0930 16:59:19.087681 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:19 crc kubenswrapper[4818]: I0930 16:59:19.090787 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:19 crc kubenswrapper[4818]: I0930 16:59:19.090844 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:19 crc kubenswrapper[4818]: I0930 16:59:19.090871 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:19 crc kubenswrapper[4818]: I0930 16:59:19.092897 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:19 crc kubenswrapper[4818]: I0930 16:59:19.092954 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:19 crc kubenswrapper[4818]: I0930 16:59:19.092970 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:19 crc kubenswrapper[4818]: I0930 16:59:19.297987 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 30 16:59:19 crc kubenswrapper[4818]: I0930 16:59:19.584621 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.096591 4818 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.096620 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.096665 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.097001 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b905c5bdc5f899562f3527a6d106f397ba1d10e9e6542ae54acdce1f2748c15f"} Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.097051 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.097087 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f1daad1bdffd89f52ed19e6ad9695580be6c9cc5672323275c2aae7c8b7b31f4"} Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.098330 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.098365 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.098382 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.099123 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.099152 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.099163 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.099656 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.099813 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.099991 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.438783 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.440595 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.440668 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.440685 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.440727 4818 kubelet_node_status.go:76] "Attempting to register node" node="crc" Sep 30 16:59:20 crc kubenswrapper[4818]: I0930 16:59:20.705198 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 30 16:59:21 crc kubenswrapper[4818]: I0930 16:59:21.099325 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:21 crc kubenswrapper[4818]: I0930 16:59:21.099469 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:21 crc kubenswrapper[4818]: I0930 16:59:21.101057 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:21 crc kubenswrapper[4818]: I0930 16:59:21.101123 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:21 crc kubenswrapper[4818]: I0930 16:59:21.101144 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:21 crc kubenswrapper[4818]: I0930 16:59:21.101149 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:21 crc kubenswrapper[4818]: I0930 16:59:21.101202 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:21 crc kubenswrapper[4818]: I0930 16:59:21.101228 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:21 crc kubenswrapper[4818]: I0930 16:59:21.159076 4818 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Sep 30 16:59:21 crc kubenswrapper[4818]: I0930 16:59:21.159199 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Sep 30 16:59:21 crc kubenswrapper[4818]: I0930 16:59:21.713904 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Sep 30 16:59:21 crc kubenswrapper[4818]: I0930 16:59:21.718300 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 30 16:59:21 crc kubenswrapper[4818]: I0930 16:59:21.718511 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:21 crc kubenswrapper[4818]: I0930 16:59:21.720288 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:21 crc kubenswrapper[4818]: I0930 16:59:21.720349 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:21 crc kubenswrapper[4818]: I0930 16:59:21.720375 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:21 crc kubenswrapper[4818]: I0930 16:59:21.729428 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 30 16:59:22 crc kubenswrapper[4818]: I0930 16:59:22.102090 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:22 crc kubenswrapper[4818]: I0930 16:59:22.102160 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 30 16:59:22 crc kubenswrapper[4818]: I0930 16:59:22.102107 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:22 crc kubenswrapper[4818]: I0930 16:59:22.102209 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:22 crc kubenswrapper[4818]: I0930 16:59:22.103630 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:22 crc kubenswrapper[4818]: I0930 16:59:22.103686 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:22 crc kubenswrapper[4818]: I0930 16:59:22.103709 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:22 crc kubenswrapper[4818]: I0930 16:59:22.103640 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:22 crc kubenswrapper[4818]: I0930 16:59:22.103807 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:22 crc kubenswrapper[4818]: I0930 16:59:22.103834 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:22 crc kubenswrapper[4818]: I0930 16:59:22.103688 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:22 crc kubenswrapper[4818]: I0930 16:59:22.103915 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:22 crc kubenswrapper[4818]: I0930 16:59:22.103966 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:23 crc kubenswrapper[4818]: I0930 16:59:23.053138 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 30 16:59:23 crc kubenswrapper[4818]: I0930 16:59:23.053473 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:23 crc kubenswrapper[4818]: I0930 16:59:23.055235 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:23 crc kubenswrapper[4818]: I0930 16:59:23.055295 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:23 crc kubenswrapper[4818]: I0930 16:59:23.055315 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:23 crc kubenswrapper[4818]: I0930 16:59:23.105360 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:23 crc kubenswrapper[4818]: I0930 16:59:23.107413 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:23 crc kubenswrapper[4818]: I0930 16:59:23.107487 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:23 crc kubenswrapper[4818]: I0930 16:59:23.107506 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:24 crc kubenswrapper[4818]: E0930 16:59:24.138358 4818 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Sep 30 16:59:24 crc kubenswrapper[4818]: I0930 16:59:24.699984 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Sep 30 16:59:24 crc kubenswrapper[4818]: I0930 16:59:24.700240 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:24 crc kubenswrapper[4818]: I0930 16:59:24.701625 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:24 crc kubenswrapper[4818]: I0930 16:59:24.701660 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:24 crc kubenswrapper[4818]: I0930 16:59:24.701671 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:27 crc kubenswrapper[4818]: W0930 16:59:27.700286 4818 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Sep 30 16:59:27 crc kubenswrapper[4818]: I0930 16:59:27.700386 4818 trace.go:236] Trace[1645490475]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (30-Sep-2025 16:59:17.699) (total time: 10000ms): Sep 30 16:59:27 crc kubenswrapper[4818]: Trace[1645490475]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10000ms (16:59:27.700) Sep 30 16:59:27 crc kubenswrapper[4818]: Trace[1645490475]: [10.000854459s] [10.000854459s] END Sep 30 16:59:27 crc kubenswrapper[4818]: E0930 16:59:27.700416 4818 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Sep 30 16:59:27 crc kubenswrapper[4818]: I0930 16:59:27.961018 4818 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Sep 30 16:59:28 crc kubenswrapper[4818]: W0930 16:59:28.175628 4818 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Sep 30 16:59:28 crc kubenswrapper[4818]: I0930 16:59:28.175789 4818 trace.go:236] Trace[1721203659]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (30-Sep-2025 16:59:18.173) (total time: 10001ms): Sep 30 16:59:28 crc kubenswrapper[4818]: Trace[1721203659]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (16:59:28.175) Sep 30 16:59:28 crc kubenswrapper[4818]: Trace[1721203659]: [10.001765872s] [10.001765872s] END Sep 30 16:59:28 crc kubenswrapper[4818]: E0930 16:59:28.175855 4818 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Sep 30 16:59:28 crc kubenswrapper[4818]: W0930 16:59:28.214538 4818 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Sep 30 16:59:28 crc kubenswrapper[4818]: I0930 16:59:28.214692 4818 trace.go:236] Trace[1202349181]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (30-Sep-2025 16:59:18.212) (total time: 10001ms): Sep 30 16:59:28 crc kubenswrapper[4818]: Trace[1202349181]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (16:59:28.214) Sep 30 16:59:28 crc kubenswrapper[4818]: Trace[1202349181]: [10.001837204s] [10.001837204s] END Sep 30 16:59:28 crc kubenswrapper[4818]: E0930 16:59:28.214726 4818 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Sep 30 16:59:28 crc kubenswrapper[4818]: I0930 16:59:28.955761 4818 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Sep 30 16:59:28 crc kubenswrapper[4818]: I0930 16:59:28.955858 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Sep 30 16:59:28 crc kubenswrapper[4818]: I0930 16:59:28.963746 4818 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Sep 30 16:59:28 crc kubenswrapper[4818]: I0930 16:59:28.963831 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Sep 30 16:59:29 crc kubenswrapper[4818]: I0930 16:59:29.307119 4818 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]log ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]etcd ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/openshift.io-startkubeinformers ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/start-apiserver-admission-initializer ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/openshift.io-api-request-count-filter ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/generic-apiserver-start-informers ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/priority-and-fairness-config-consumer ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/priority-and-fairness-filter ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/storage-object-count-tracker-hook ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/start-apiextensions-informers ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/start-apiextensions-controllers ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/crd-informer-synced ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/start-system-namespaces-controller ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/start-cluster-authentication-info-controller ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/start-legacy-token-tracking-controller ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/start-service-ip-repair-controllers ok Sep 30 16:59:29 crc kubenswrapper[4818]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Sep 30 16:59:29 crc kubenswrapper[4818]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/priority-and-fairness-config-producer ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/bootstrap-controller ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/start-kube-aggregator-informers ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/apiservice-status-local-available-controller ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/apiservice-status-remote-available-controller ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/apiservice-registration-controller ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/apiservice-wait-for-first-sync ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/apiservice-discovery-controller ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/kube-apiserver-autoregistration ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]autoregister-completion ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/apiservice-openapi-controller ok Sep 30 16:59:29 crc kubenswrapper[4818]: [+]poststarthook/apiservice-openapiv3-controller ok Sep 30 16:59:29 crc kubenswrapper[4818]: livez check failed Sep 30 16:59:29 crc kubenswrapper[4818]: I0930 16:59:29.307262 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 30 16:59:31 crc kubenswrapper[4818]: I0930 16:59:31.159859 4818 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Sep 30 16:59:31 crc kubenswrapper[4818]: I0930 16:59:31.160092 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Sep 30 16:59:31 crc kubenswrapper[4818]: I0930 16:59:31.440458 4818 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Sep 30 16:59:31 crc kubenswrapper[4818]: I0930 16:59:31.753380 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Sep 30 16:59:31 crc kubenswrapper[4818]: I0930 16:59:31.753669 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:31 crc kubenswrapper[4818]: I0930 16:59:31.755836 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:31 crc kubenswrapper[4818]: I0930 16:59:31.755958 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:31 crc kubenswrapper[4818]: I0930 16:59:31.755981 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:31 crc kubenswrapper[4818]: I0930 16:59:31.774378 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Sep 30 16:59:32 crc kubenswrapper[4818]: I0930 16:59:32.137760 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:32 crc kubenswrapper[4818]: I0930 16:59:32.139144 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:32 crc kubenswrapper[4818]: I0930 16:59:32.139177 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:32 crc kubenswrapper[4818]: I0930 16:59:32.139191 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.162342 4818 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.615989 4818 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Sep 30 16:59:33 crc kubenswrapper[4818]: E0930 16:59:33.950682 4818 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.952014 4818 apiserver.go:52] "Watching apiserver" Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.956780 4818 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Sep 30 16:59:33 crc kubenswrapper[4818]: E0930 16:59:33.962186 4818 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.965769 4818 trace.go:236] Trace[1317996347]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (30-Sep-2025 16:59:22.710) (total time: 11255ms): Sep 30 16:59:33 crc kubenswrapper[4818]: Trace[1317996347]: ---"Objects listed" error: 11255ms (16:59:33.965) Sep 30 16:59:33 crc kubenswrapper[4818]: Trace[1317996347]: [11.255641067s] [11.255641067s] END Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.965838 4818 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.968629 4818 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.972282 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.972892 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.972958 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.973090 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.973140 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.973190 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:33 crc kubenswrapper[4818]: E0930 16:59:33.973278 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.973414 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 30 16:59:33 crc kubenswrapper[4818]: E0930 16:59:33.973658 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 16:59:33 crc kubenswrapper[4818]: E0930 16:59:33.974185 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.976717 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.978752 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.978893 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.979044 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.979142 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.979228 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.979595 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.979816 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Sep 30 16:59:33 crc kubenswrapper[4818]: I0930 16:59:33.980008 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.013138 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.016296 4818 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:50082->192.168.126.11:17697: read: connection reset by peer" start-of-body= Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.016536 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:50082->192.168.126.11:17697: read: connection reset by peer" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.050510 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.065459 4818 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.072469 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.089669 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.105061 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.115707 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.129868 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.144111 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.148109 4818 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245" exitCode=255 Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.148165 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245"} Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.149585 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.157685 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.157749 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.157817 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.158459 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.158511 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.158596 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.158720 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.158538 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.158981 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.159086 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.159119 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.159149 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.159978 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.159523 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160047 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160106 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160162 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160200 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160246 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160281 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160315 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.159537 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160354 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160407 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160449 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160489 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160524 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160560 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160597 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160638 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160673 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160717 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160754 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160788 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160821 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160857 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.159540 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160894 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160323 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160312 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160411 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160537 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160961 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161005 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161047 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161084 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161121 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161170 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161207 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161241 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161337 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161373 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161411 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161445 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161484 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161522 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161569 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161607 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161644 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161678 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161904 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161976 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162014 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162053 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162089 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162125 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162163 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162199 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162267 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162300 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162335 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162372 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162405 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162436 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162468 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162502 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162543 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162574 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162612 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162651 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162689 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162724 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162762 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162801 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162839 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162875 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162915 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162991 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163033 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163076 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163118 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163155 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163228 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163266 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163303 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163342 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160707 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163377 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163493 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163534 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163572 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163608 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163643 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163683 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163717 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163782 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163820 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163858 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164019 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164062 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164099 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164137 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164174 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164209 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164242 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164274 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164315 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164357 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164393 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164426 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164461 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164548 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164587 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164664 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165024 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165166 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165214 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165249 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165386 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165426 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165460 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165494 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165526 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165564 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165606 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165642 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165677 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165712 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165793 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165833 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165871 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165912 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165971 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166008 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166048 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166087 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166129 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166169 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166208 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166249 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166286 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166321 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166357 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166394 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166432 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166471 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166509 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166547 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166585 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166621 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166668 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166706 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166743 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166778 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166815 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166852 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166888 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166948 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166990 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167027 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167069 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167108 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167143 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167180 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167244 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167282 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167320 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167373 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167414 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167456 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167498 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167539 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167579 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167617 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167658 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167695 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167733 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167776 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167821 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167861 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167899 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167966 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.168006 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.168253 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.168293 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.168328 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.168372 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.168412 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.168473 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.168510 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.168543 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.168581 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.168623 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.168659 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.168696 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.168733 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.168769 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.168869 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.169021 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163386 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160725 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.160879 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161015 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161179 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161389 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161435 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161614 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161683 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161840 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.161965 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162080 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162329 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162606 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162643 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162704 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.162903 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163259 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163415 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163530 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163815 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.163858 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164004 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164273 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164329 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164617 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164812 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165282 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.164797 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165532 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165806 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165843 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.165998 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166111 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166173 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166345 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166872 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.166985 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167045 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167331 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167374 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167464 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167707 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167815 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.167916 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.168335 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.168348 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.168637 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.168960 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 16:59:34.668936296 +0000 UTC m=+21.423208122 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189030 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189078 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189103 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189136 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189156 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189183 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189203 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189222 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189240 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189259 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189277 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189301 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189320 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189451 4818 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189464 4818 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189456 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189477 4818 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189525 4818 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189549 4818 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189574 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189595 4818 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.189644 4818 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189685 4818 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.189705 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:34.689687121 +0000 UTC m=+21.443958927 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189739 4818 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189763 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.188650 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189811 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.189776 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.190087 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.190134 4818 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.169431 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.169659 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.170000 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.170662 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.170977 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.171015 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.171098 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.171376 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.171523 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.171951 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.171810 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.172078 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.172313 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.173673 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.173739 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.173816 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.173755 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.174087 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.177308 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.177641 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.178199 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.180310 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.180348 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.180486 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.180570 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.180585 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.180654 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.180845 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.180856 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.181165 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.181406 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.181529 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.181620 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.181731 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.181789 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.182013 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.182108 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.182123 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.182492 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.182482 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.190440 4818 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.190331 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.182705 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.182882 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.183000 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.183032 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.183442 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.183701 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.184068 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.184102 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.184348 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.184743 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.184817 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.185088 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.185253 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.185299 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.185615 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.186736 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.187522 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.187555 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.188708 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.182565 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.190834 4818 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.190908 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.190979 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.191173 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.191254 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.191434 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:34.691410413 +0000 UTC m=+21.445682419 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.191466 4818 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.191464 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.191480 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.191491 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.191622 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.191878 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.191825 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.192275 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.192274 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.192479 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.192503 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193290 4818 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193332 4818 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193354 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193378 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193400 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193422 4818 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193443 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193463 4818 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193500 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193522 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193543 4818 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193563 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193584 4818 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193605 4818 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193625 4818 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193646 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193666 4818 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193687 4818 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193745 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193779 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193806 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193828 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193848 4818 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193866 4818 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193885 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193914 4818 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193969 4818 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.193997 4818 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.194025 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.194043 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.194053 4818 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.194131 4818 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.194164 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.194192 4818 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.194221 4818 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.194249 4818 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.194275 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.194300 4818 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.194323 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.194348 4818 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.194371 4818 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.194396 4818 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.194419 4818 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.194443 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.194467 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.194894 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.194939 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.198782 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.198965 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.199018 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.200435 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.208815 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.200248 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.208912 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.209104 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.210607 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.210688 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.210728 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.210754 4818 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.210840 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:34.710809255 +0000 UTC m=+21.465081071 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.212074 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.212276 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.213011 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.213582 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.214524 4818 scope.go:117] "RemoveContainer" containerID="52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.214613 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.214652 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.215694 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.216225 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.216257 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.218219 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.218971 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.219008 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.219067 4818 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.219134 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:34.719110517 +0000 UTC m=+21.473382513 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.221113 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.223441 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.223622 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.224085 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.224406 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.226313 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.226978 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.227005 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.228495 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.228520 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.228617 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.228697 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.228840 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.228880 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.229420 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.229857 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.230141 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.231599 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.232566 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.232795 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.233192 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.233370 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.233430 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.233886 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.234038 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.234017 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.234307 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.234852 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.236094 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.236299 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.236520 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.236820 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.238326 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.239581 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.239750 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.239849 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.240567 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.242396 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.242674 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.243234 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.243897 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.244127 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.247340 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.249042 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.279643 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.286746 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.290882 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.295947 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.295999 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296057 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296083 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296103 4818 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296119 4818 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296130 4818 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296141 4818 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296151 4818 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296162 4818 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296176 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296187 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296199 4818 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296213 4818 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296224 4818 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296233 4818 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296244 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296257 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296198 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296268 4818 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296313 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296327 4818 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296340 4818 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296351 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296365 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296377 4818 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296394 4818 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296405 4818 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296415 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296427 4818 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296440 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296451 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296464 4818 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296474 4818 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296484 4818 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296494 4818 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296505 4818 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296514 4818 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296525 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296535 4818 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296546 4818 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296557 4818 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296570 4818 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296581 4818 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296594 4818 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296604 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296615 4818 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296625 4818 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296636 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296648 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296660 4818 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296671 4818 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296681 4818 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296693 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296707 4818 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296719 4818 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296729 4818 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296740 4818 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296750 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296760 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296771 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296781 4818 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296791 4818 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296802 4818 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296814 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296824 4818 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296834 4818 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296845 4818 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296855 4818 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296865 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296877 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296890 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296900 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296910 4818 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296937 4818 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296949 4818 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296961 4818 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296971 4818 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296983 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.296993 4818 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297003 4818 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297015 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297032 4818 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297043 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297053 4818 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297065 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297075 4818 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297085 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297098 4818 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297108 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297118 4818 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297130 4818 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297140 4818 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297149 4818 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297159 4818 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297170 4818 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297181 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297195 4818 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297206 4818 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297217 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297227 4818 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297238 4818 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297249 4818 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297259 4818 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297269 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297280 4818 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297291 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297301 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297313 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297339 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297349 4818 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297361 4818 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297371 4818 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297381 4818 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297394 4818 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297405 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297415 4818 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297424 4818 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297433 4818 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297444 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297456 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297467 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297479 4818 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297488 4818 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297498 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297507 4818 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297517 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297527 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297537 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297547 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297557 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297566 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297576 4818 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297587 4818 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297596 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297638 4818 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297649 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297659 4818 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297670 4818 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297679 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.297689 4818 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.300810 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.304734 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.313711 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: W0930 16:59:34.314090 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-42d152eefd19cfb9d455d83d736540f3cbd86913e56e10acc61a6c8b45437487 WatchSource:0}: Error finding container 42d152eefd19cfb9d455d83d736540f3cbd86913e56e10acc61a6c8b45437487: Status 404 returned error can't find the container with id 42d152eefd19cfb9d455d83d736540f3cbd86913e56e10acc61a6c8b45437487 Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.321555 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.325627 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.331251 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.337211 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: W0930 16:59:34.347424 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-89e71e53f2709411c720be8328a04df5eb70fb7dc507fe56e8ee5942f4bcc252 WatchSource:0}: Error finding container 89e71e53f2709411c720be8328a04df5eb70fb7dc507fe56e8ee5942f4bcc252: Status 404 returned error can't find the container with id 89e71e53f2709411c720be8328a04df5eb70fb7dc507fe56e8ee5942f4bcc252 Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.348255 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.358820 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.369784 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.377597 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.591703 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.603588 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.613661 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.631178 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.648469 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.660253 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.674461 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.700030 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.700142 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.700196 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.700396 4818 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.700465 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:35.700445652 +0000 UTC m=+22.454717488 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.700949 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 16:59:35.700909944 +0000 UTC m=+22.455181780 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.701011 4818 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.701051 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:35.701038737 +0000 UTC m=+22.455310573 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.723233 4818 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.724276 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.800883 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.800970 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.801121 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.801141 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.801154 4818 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.801150 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.801193 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.801206 4818 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.801211 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:35.801195614 +0000 UTC m=+22.555467450 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:34 crc kubenswrapper[4818]: E0930 16:59:34.801275 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:35.801253496 +0000 UTC m=+22.555525362 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.887477 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.897037 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.907840 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.923437 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.939110 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.955457 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.973218 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:34 crc kubenswrapper[4818]: I0930 16:59:34.989208 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.003546 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.019974 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:35 crc kubenswrapper[4818]: E0930 16:59:35.020212 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.154943 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.158335 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3"} Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.158553 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.159563 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"89e71e53f2709411c720be8328a04df5eb70fb7dc507fe56e8ee5942f4bcc252"} Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.161737 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef"} Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.161786 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc"} Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.161805 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"c9108f875367cbd5c9a1fb015e72e8b7b3090485e99e8abf00674e278beac9dd"} Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.163504 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c"} Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.164148 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"42d152eefd19cfb9d455d83d736540f3cbd86913e56e10acc61a6c8b45437487"} Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.177843 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:35Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.196426 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:35Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.221959 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:35Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.235101 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:35Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.251851 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:35Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.263235 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:35Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.276299 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:35Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.289606 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:35Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.314396 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:35Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.329981 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:35Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.348194 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:35Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.363511 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:35Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.376218 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:35Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.394476 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:35Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.417643 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:35Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.433024 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:35Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.710054 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.710150 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.710184 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:35 crc kubenswrapper[4818]: E0930 16:59:35.710255 4818 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Sep 30 16:59:35 crc kubenswrapper[4818]: E0930 16:59:35.710310 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 16:59:37.71026839 +0000 UTC m=+24.464540236 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 16:59:35 crc kubenswrapper[4818]: E0930 16:59:35.710363 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:37.710347892 +0000 UTC m=+24.464619738 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Sep 30 16:59:35 crc kubenswrapper[4818]: E0930 16:59:35.710548 4818 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 30 16:59:35 crc kubenswrapper[4818]: E0930 16:59:35.710698 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:37.71066316 +0000 UTC m=+24.464935016 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.810586 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 16:59:35 crc kubenswrapper[4818]: I0930 16:59:35.810654 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:35 crc kubenswrapper[4818]: E0930 16:59:35.810799 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 30 16:59:35 crc kubenswrapper[4818]: E0930 16:59:35.810819 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 30 16:59:35 crc kubenswrapper[4818]: E0930 16:59:35.810823 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 30 16:59:35 crc kubenswrapper[4818]: E0930 16:59:35.810865 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 30 16:59:35 crc kubenswrapper[4818]: E0930 16:59:35.810877 4818 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:35 crc kubenswrapper[4818]: E0930 16:59:35.810833 4818 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:35 crc kubenswrapper[4818]: E0930 16:59:35.810957 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:37.810940561 +0000 UTC m=+24.565212377 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:35 crc kubenswrapper[4818]: E0930 16:59:35.811035 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:37.811016973 +0000 UTC m=+24.565288889 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.019574 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 16:59:36 crc kubenswrapper[4818]: E0930 16:59:36.019691 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.019790 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:36 crc kubenswrapper[4818]: E0930 16:59:36.020024 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.024039 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.025198 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.026855 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.027690 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.029192 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.029870 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.030760 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.032110 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.032961 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.034343 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.034845 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.035500 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.036002 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.036502 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.036998 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.037476 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.037994 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.038361 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.038876 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.039415 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.039821 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.040361 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.040756 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.041397 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.041768 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.042329 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.042899 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.046383 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.046897 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.047671 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.048116 4818 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.048259 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.050114 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.050607 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.051022 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.052411 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.053320 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.053780 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.054798 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.055409 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.056202 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.056751 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.057674 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.058241 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.059011 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.059499 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.060327 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.060999 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.061768 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.062308 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.063095 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.063592 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.064120 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Sep 30 16:59:36 crc kubenswrapper[4818]: I0930 16:59:36.064893 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Sep 30 16:59:37 crc kubenswrapper[4818]: I0930 16:59:37.019562 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:37 crc kubenswrapper[4818]: E0930 16:59:37.019800 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 16:59:37 crc kubenswrapper[4818]: I0930 16:59:37.171100 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1"} Sep 30 16:59:37 crc kubenswrapper[4818]: I0930 16:59:37.192333 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:37Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:37 crc kubenswrapper[4818]: I0930 16:59:37.213063 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:37Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:37 crc kubenswrapper[4818]: I0930 16:59:37.230123 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:37Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:37 crc kubenswrapper[4818]: I0930 16:59:37.254588 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:37Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:37 crc kubenswrapper[4818]: I0930 16:59:37.279740 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:37Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:37 crc kubenswrapper[4818]: I0930 16:59:37.299393 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:37Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:37 crc kubenswrapper[4818]: I0930 16:59:37.320950 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:37Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:37 crc kubenswrapper[4818]: I0930 16:59:37.346426 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:37Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:37 crc kubenswrapper[4818]: I0930 16:59:37.731118 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 16:59:37 crc kubenswrapper[4818]: I0930 16:59:37.731322 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:37 crc kubenswrapper[4818]: I0930 16:59:37.731397 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:37 crc kubenswrapper[4818]: E0930 16:59:37.731495 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 16:59:41.731460343 +0000 UTC m=+28.485732199 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 16:59:37 crc kubenswrapper[4818]: E0930 16:59:37.731543 4818 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Sep 30 16:59:37 crc kubenswrapper[4818]: E0930 16:59:37.731609 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:41.731587997 +0000 UTC m=+28.485859813 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Sep 30 16:59:37 crc kubenswrapper[4818]: E0930 16:59:37.731672 4818 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 30 16:59:37 crc kubenswrapper[4818]: E0930 16:59:37.731760 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:41.73172073 +0000 UTC m=+28.485992586 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 30 16:59:37 crc kubenswrapper[4818]: I0930 16:59:37.832952 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 16:59:37 crc kubenswrapper[4818]: I0930 16:59:37.833028 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:37 crc kubenswrapper[4818]: E0930 16:59:37.833231 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 30 16:59:37 crc kubenswrapper[4818]: E0930 16:59:37.833254 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 30 16:59:37 crc kubenswrapper[4818]: E0930 16:59:37.833274 4818 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:37 crc kubenswrapper[4818]: E0930 16:59:37.833331 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:41.833314573 +0000 UTC m=+28.587586409 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:37 crc kubenswrapper[4818]: E0930 16:59:37.833782 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 30 16:59:37 crc kubenswrapper[4818]: E0930 16:59:37.833865 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 30 16:59:37 crc kubenswrapper[4818]: E0930 16:59:37.833886 4818 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:37 crc kubenswrapper[4818]: E0930 16:59:37.834073 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:41.83404222 +0000 UTC m=+28.588314196 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.019985 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.020114 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:38 crc kubenswrapper[4818]: E0930 16:59:38.020219 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 16:59:38 crc kubenswrapper[4818]: E0930 16:59:38.020328 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.166644 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.172860 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.186613 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.201968 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.221706 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.238189 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.252259 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.264131 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.277080 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.291621 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.307586 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.324716 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.342870 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.357056 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.398104 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.435872 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.454894 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.467071 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.818158 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-vmncz"] Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.818436 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-vmncz" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.820153 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.820354 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.820463 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.820668 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.836127 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.850054 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.869893 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.882520 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.896934 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.910469 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.927227 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.941255 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/85fe3d18-20dd-467f-be69-fcaa139126f9-serviceca\") pod \"node-ca-vmncz\" (UID: \"85fe3d18-20dd-467f-be69-fcaa139126f9\") " pod="openshift-image-registry/node-ca-vmncz" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.941539 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/85fe3d18-20dd-467f-be69-fcaa139126f9-host\") pod \"node-ca-vmncz\" (UID: \"85fe3d18-20dd-467f-be69-fcaa139126f9\") " pod="openshift-image-registry/node-ca-vmncz" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.941664 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9zrn\" (UniqueName: \"kubernetes.io/projected/85fe3d18-20dd-467f-be69-fcaa139126f9-kube-api-access-s9zrn\") pod \"node-ca-vmncz\" (UID: \"85fe3d18-20dd-467f-be69-fcaa139126f9\") " pod="openshift-image-registry/node-ca-vmncz" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.943956 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:38 crc kubenswrapper[4818]: I0930 16:59:38.963742 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:38Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.020312 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:39 crc kubenswrapper[4818]: E0930 16:59:39.020464 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.042184 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/85fe3d18-20dd-467f-be69-fcaa139126f9-serviceca\") pod \"node-ca-vmncz\" (UID: \"85fe3d18-20dd-467f-be69-fcaa139126f9\") " pod="openshift-image-registry/node-ca-vmncz" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.042240 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/85fe3d18-20dd-467f-be69-fcaa139126f9-host\") pod \"node-ca-vmncz\" (UID: \"85fe3d18-20dd-467f-be69-fcaa139126f9\") " pod="openshift-image-registry/node-ca-vmncz" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.042256 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9zrn\" (UniqueName: \"kubernetes.io/projected/85fe3d18-20dd-467f-be69-fcaa139126f9-kube-api-access-s9zrn\") pod \"node-ca-vmncz\" (UID: \"85fe3d18-20dd-467f-be69-fcaa139126f9\") " pod="openshift-image-registry/node-ca-vmncz" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.042421 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/85fe3d18-20dd-467f-be69-fcaa139126f9-host\") pod \"node-ca-vmncz\" (UID: \"85fe3d18-20dd-467f-be69-fcaa139126f9\") " pod="openshift-image-registry/node-ca-vmncz" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.044242 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/85fe3d18-20dd-467f-be69-fcaa139126f9-serviceca\") pod \"node-ca-vmncz\" (UID: \"85fe3d18-20dd-467f-be69-fcaa139126f9\") " pod="openshift-image-registry/node-ca-vmncz" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.059459 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9zrn\" (UniqueName: \"kubernetes.io/projected/85fe3d18-20dd-467f-be69-fcaa139126f9-kube-api-access-s9zrn\") pod \"node-ca-vmncz\" (UID: \"85fe3d18-20dd-467f-be69-fcaa139126f9\") " pod="openshift-image-registry/node-ca-vmncz" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.129420 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-vmncz" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.180448 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-vmncz" event={"ID":"85fe3d18-20dd-467f-be69-fcaa139126f9","Type":"ContainerStarted","Data":"202b90c96b16a064747104eaa98e4a9af713dd5b0b4773648a4d571674052507"} Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.198439 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-gd5fd"] Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.198982 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-gd5fd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.202567 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.203023 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.204251 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.223011 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.240286 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.244457 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/17ee0898-ae49-455e-b283-185058ad07b0-hosts-file\") pod \"node-resolver-gd5fd\" (UID: \"17ee0898-ae49-455e-b283-185058ad07b0\") " pod="openshift-dns/node-resolver-gd5fd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.244514 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxpnv\" (UniqueName: \"kubernetes.io/projected/17ee0898-ae49-455e-b283-185058ad07b0-kube-api-access-fxpnv\") pod \"node-resolver-gd5fd\" (UID: \"17ee0898-ae49-455e-b283-185058ad07b0\") " pod="openshift-dns/node-resolver-gd5fd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.267283 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.285345 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.305462 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.321443 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.332065 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.343144 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.345195 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/17ee0898-ae49-455e-b283-185058ad07b0-hosts-file\") pod \"node-resolver-gd5fd\" (UID: \"17ee0898-ae49-455e-b283-185058ad07b0\") " pod="openshift-dns/node-resolver-gd5fd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.345248 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxpnv\" (UniqueName: \"kubernetes.io/projected/17ee0898-ae49-455e-b283-185058ad07b0-kube-api-access-fxpnv\") pod \"node-resolver-gd5fd\" (UID: \"17ee0898-ae49-455e-b283-185058ad07b0\") " pod="openshift-dns/node-resolver-gd5fd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.345463 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/17ee0898-ae49-455e-b283-185058ad07b0-hosts-file\") pod \"node-resolver-gd5fd\" (UID: \"17ee0898-ae49-455e-b283-185058ad07b0\") " pod="openshift-dns/node-resolver-gd5fd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.355201 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.360307 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxpnv\" (UniqueName: \"kubernetes.io/projected/17ee0898-ae49-455e-b283-185058ad07b0-kube-api-access-fxpnv\") pod \"node-resolver-gd5fd\" (UID: \"17ee0898-ae49-455e-b283-185058ad07b0\") " pod="openshift-dns/node-resolver-gd5fd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.370570 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.526817 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-gd5fd" Sep 30 16:59:39 crc kubenswrapper[4818]: W0930 16:59:39.540103 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod17ee0898_ae49_455e_b283_185058ad07b0.slice/crio-e7cfdabda7be6d78e4890879897bf395d84bb60f1660d99036c2b4cfe452f8a2 WatchSource:0}: Error finding container e7cfdabda7be6d78e4890879897bf395d84bb60f1660d99036c2b4cfe452f8a2: Status 404 returned error can't find the container with id e7cfdabda7be6d78e4890879897bf395d84bb60f1660d99036c2b4cfe452f8a2 Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.612588 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-vc6ss"] Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.612919 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-wzw6f"] Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.613123 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.614174 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-hq6j2"] Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.614338 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.614527 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.618579 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.618772 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.618985 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.619111 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.619204 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.619544 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.619796 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.619912 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.620040 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.620256 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.620365 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.620460 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.633706 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-ljmfd"] Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.635883 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.640608 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.641343 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.641492 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.647973 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.648208 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.648318 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.648455 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.648460 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.677446 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.690762 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.712496 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.736996 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.750708 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8ckz\" (UniqueName: \"kubernetes.io/projected/d36fce8a-ff27-48bf-be9c-67fc2046136d-kube-api-access-w8ckz\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.750744 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-cni-netd\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.750761 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.750779 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/8dd02846-2628-4200-a7fe-886042bd15bb-tuning-conf-dir\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.750797 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-host-var-lib-cni-multus\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.750812 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-run-openvswitch\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.750827 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/8dd02846-2628-4200-a7fe-886042bd15bb-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.750843 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-slash\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.750861 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-multus-socket-dir-parent\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.750896 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-ovnkube-script-lib\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.750911 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-cnibin\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.750941 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-host-run-k8s-cni-cncf-io\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.750955 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-etc-kubernetes\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.750969 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-host-var-lib-kubelet\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.750983 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-host-run-multus-certs\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.750998 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsxbk\" (UniqueName: \"kubernetes.io/projected/8dd02846-2628-4200-a7fe-886042bd15bb-kube-api-access-gsxbk\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751013 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5e908152-dcb2-4b41-974d-26b03ae0254b-mcd-auth-proxy-config\") pod \"machine-config-daemon-vc6ss\" (UID: \"5e908152-dcb2-4b41-974d-26b03ae0254b\") " pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751030 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-log-socket\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751056 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-host-run-netns\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751071 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/d36fce8a-ff27-48bf-be9c-67fc2046136d-multus-daemon-config\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751084 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-node-log\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751117 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/8dd02846-2628-4200-a7fe-886042bd15bb-cnibin\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751134 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/8dd02846-2628-4200-a7fe-886042bd15bb-os-release\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751147 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-ovnkube-config\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751160 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-ovn-node-metrics-cert\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751174 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/8dd02846-2628-4200-a7fe-886042bd15bb-cni-binary-copy\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751194 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-systemd-units\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751207 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-run-systemd\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751221 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-os-release\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751235 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-multus-conf-dir\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751250 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-run-netns\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751271 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-kubelet\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751288 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-var-lib-openvswitch\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751304 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-run-ovn\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751319 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-system-cni-dir\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751340 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxqgg\" (UniqueName: \"kubernetes.io/projected/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-kube-api-access-vxqgg\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751355 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-etc-openvswitch\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751369 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-env-overrides\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751385 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8dd02846-2628-4200-a7fe-886042bd15bb-system-cni-dir\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751401 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdxhd\" (UniqueName: \"kubernetes.io/projected/5e908152-dcb2-4b41-974d-26b03ae0254b-kube-api-access-qdxhd\") pod \"machine-config-daemon-vc6ss\" (UID: \"5e908152-dcb2-4b41-974d-26b03ae0254b\") " pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751415 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-cni-bin\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751430 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d36fce8a-ff27-48bf-be9c-67fc2046136d-cni-binary-copy\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751445 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-host-var-lib-cni-bin\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751460 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5e908152-dcb2-4b41-974d-26b03ae0254b-rootfs\") pod \"machine-config-daemon-vc6ss\" (UID: \"5e908152-dcb2-4b41-974d-26b03ae0254b\") " pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751474 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5e908152-dcb2-4b41-974d-26b03ae0254b-proxy-tls\") pod \"machine-config-daemon-vc6ss\" (UID: \"5e908152-dcb2-4b41-974d-26b03ae0254b\") " pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751489 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-multus-cni-dir\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751503 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-hostroot\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.751517 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-run-ovn-kubernetes\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.755382 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.769833 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.784261 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.796624 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.808834 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.825116 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.835856 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.847387 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.852763 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-hostroot\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.852795 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-run-ovn-kubernetes\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.852814 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8ckz\" (UniqueName: \"kubernetes.io/projected/d36fce8a-ff27-48bf-be9c-67fc2046136d-kube-api-access-w8ckz\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.852829 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-cni-netd\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.852865 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-hostroot\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.852906 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-cni-netd\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.852878 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-run-ovn-kubernetes\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.852891 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.852981 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/8dd02846-2628-4200-a7fe-886042bd15bb-tuning-conf-dir\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853000 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-host-var-lib-cni-multus\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853025 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-run-openvswitch\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853032 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-host-var-lib-cni-multus\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853000 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853049 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/8dd02846-2628-4200-a7fe-886042bd15bb-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853072 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-slash\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853078 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-run-openvswitch\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853088 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-multus-socket-dir-parent\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853105 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-slash\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853118 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-ovnkube-script-lib\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853138 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-cnibin\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853152 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-host-run-k8s-cni-cncf-io\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853166 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-etc-kubernetes\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853184 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-host-var-lib-kubelet\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853202 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-host-run-multus-certs\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853218 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsxbk\" (UniqueName: \"kubernetes.io/projected/8dd02846-2628-4200-a7fe-886042bd15bb-kube-api-access-gsxbk\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853220 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-cnibin\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853233 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5e908152-dcb2-4b41-974d-26b03ae0254b-mcd-auth-proxy-config\") pod \"machine-config-daemon-vc6ss\" (UID: \"5e908152-dcb2-4b41-974d-26b03ae0254b\") " pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853249 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-log-socket\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853239 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-multus-socket-dir-parent\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853275 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-host-run-netns\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853290 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/d36fce8a-ff27-48bf-be9c-67fc2046136d-multus-daemon-config\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853266 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-etc-kubernetes\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853304 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-node-log\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853250 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-host-var-lib-kubelet\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853307 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-host-run-k8s-cni-cncf-io\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853348 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-host-run-netns\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853326 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/8dd02846-2628-4200-a7fe-886042bd15bb-cnibin\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853408 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/8dd02846-2628-4200-a7fe-886042bd15bb-os-release\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853414 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-host-run-multus-certs\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853429 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-ovnkube-config\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853443 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-log-socket\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853456 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-ovn-node-metrics-cert\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853485 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/8dd02846-2628-4200-a7fe-886042bd15bb-cni-binary-copy\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853507 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-systemd-units\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853528 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-run-systemd\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853559 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-os-release\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853581 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-multus-conf-dir\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853601 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-run-netns\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853644 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-kubelet\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853659 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-var-lib-openvswitch\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853675 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-run-ovn\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853678 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/8dd02846-2628-4200-a7fe-886042bd15bb-os-release\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853693 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-system-cni-dir\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853718 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-os-release\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853721 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxqgg\" (UniqueName: \"kubernetes.io/projected/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-kube-api-access-vxqgg\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853741 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-etc-openvswitch\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853762 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-env-overrides\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853786 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8dd02846-2628-4200-a7fe-886042bd15bb-system-cni-dir\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853804 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/8dd02846-2628-4200-a7fe-886042bd15bb-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853809 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdxhd\" (UniqueName: \"kubernetes.io/projected/5e908152-dcb2-4b41-974d-26b03ae0254b-kube-api-access-qdxhd\") pod \"machine-config-daemon-vc6ss\" (UID: \"5e908152-dcb2-4b41-974d-26b03ae0254b\") " pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853849 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-cni-bin\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853867 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d36fce8a-ff27-48bf-be9c-67fc2046136d-cni-binary-copy\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853884 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-host-var-lib-cni-bin\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853903 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5e908152-dcb2-4b41-974d-26b03ae0254b-rootfs\") pod \"machine-config-daemon-vc6ss\" (UID: \"5e908152-dcb2-4b41-974d-26b03ae0254b\") " pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853941 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5e908152-dcb2-4b41-974d-26b03ae0254b-proxy-tls\") pod \"machine-config-daemon-vc6ss\" (UID: \"5e908152-dcb2-4b41-974d-26b03ae0254b\") " pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853950 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-ovnkube-script-lib\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.853958 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-multus-cni-dir\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854000 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-multus-cni-dir\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854035 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5e908152-dcb2-4b41-974d-26b03ae0254b-mcd-auth-proxy-config\") pod \"machine-config-daemon-vc6ss\" (UID: \"5e908152-dcb2-4b41-974d-26b03ae0254b\") " pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854040 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-host-var-lib-cni-bin\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854059 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5e908152-dcb2-4b41-974d-26b03ae0254b-rootfs\") pod \"machine-config-daemon-vc6ss\" (UID: \"5e908152-dcb2-4b41-974d-26b03ae0254b\") " pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854061 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/d36fce8a-ff27-48bf-be9c-67fc2046136d-multus-daemon-config\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854087 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-run-ovn\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854120 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-multus-conf-dir\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854142 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-run-netns\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854163 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-kubelet\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854200 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-var-lib-openvswitch\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854218 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d36fce8a-ff27-48bf-be9c-67fc2046136d-system-cni-dir\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854266 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-ovnkube-config\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854302 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-systemd-units\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854325 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-run-systemd\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854355 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8dd02846-2628-4200-a7fe-886042bd15bb-system-cni-dir\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854378 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-cni-bin\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854397 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-etc-openvswitch\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854415 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-node-log\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854500 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/8dd02846-2628-4200-a7fe-886042bd15bb-cnibin\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854515 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d36fce8a-ff27-48bf-be9c-67fc2046136d-cni-binary-copy\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854587 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/8dd02846-2628-4200-a7fe-886042bd15bb-tuning-conf-dir\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854597 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/8dd02846-2628-4200-a7fe-886042bd15bb-cni-binary-copy\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.854639 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-env-overrides\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.858493 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-ovn-node-metrics-cert\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.858577 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5e908152-dcb2-4b41-974d-26b03ae0254b-proxy-tls\") pod \"machine-config-daemon-vc6ss\" (UID: \"5e908152-dcb2-4b41-974d-26b03ae0254b\") " pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.859760 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.870693 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdxhd\" (UniqueName: \"kubernetes.io/projected/5e908152-dcb2-4b41-974d-26b03ae0254b-kube-api-access-qdxhd\") pod \"machine-config-daemon-vc6ss\" (UID: \"5e908152-dcb2-4b41-974d-26b03ae0254b\") " pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.870840 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8ckz\" (UniqueName: \"kubernetes.io/projected/d36fce8a-ff27-48bf-be9c-67fc2046136d-kube-api-access-w8ckz\") pod \"multus-hq6j2\" (UID: \"d36fce8a-ff27-48bf-be9c-67fc2046136d\") " pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.871668 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxqgg\" (UniqueName: \"kubernetes.io/projected/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-kube-api-access-vxqgg\") pod \"ovnkube-node-ljmfd\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.875093 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.875366 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsxbk\" (UniqueName: \"kubernetes.io/projected/8dd02846-2628-4200-a7fe-886042bd15bb-kube-api-access-gsxbk\") pod \"multus-additional-cni-plugins-wzw6f\" (UID: \"8dd02846-2628-4200-a7fe-886042bd15bb\") " pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.887833 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.902700 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.920060 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.928629 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.936125 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: W0930 16:59:39.939541 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5e908152_dcb2_4b41_974d_26b03ae0254b.slice/crio-8234c605b3d0eeec58479a75de74cad6a66c5d137462112ceacc582bc7477e35 WatchSource:0}: Error finding container 8234c605b3d0eeec58479a75de74cad6a66c5d137462112ceacc582bc7477e35: Status 404 returned error can't find the container with id 8234c605b3d0eeec58479a75de74cad6a66c5d137462112ceacc582bc7477e35 Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.943615 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-hq6j2" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.948350 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.953826 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.954958 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:39 crc kubenswrapper[4818]: W0930 16:59:39.956207 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd36fce8a_ff27_48bf_be9c_67fc2046136d.slice/crio-07096b78185eb7fe59dbd5440f2ab668c841f5323f1a26d005f7542f788e35cc WatchSource:0}: Error finding container 07096b78185eb7fe59dbd5440f2ab668c841f5323f1a26d005f7542f788e35cc: Status 404 returned error can't find the container with id 07096b78185eb7fe59dbd5440f2ab668c841f5323f1a26d005f7542f788e35cc Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.968125 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: W0930 16:59:39.969288 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod68c5c05a_d7b3_4e6d_b2a8_166bb36df9e6.slice/crio-e9300817e2b47f72209ac87ee8ed546e5d7f71915f34a2e48144bd2dc6ca4bb7 WatchSource:0}: Error finding container e9300817e2b47f72209ac87ee8ed546e5d7f71915f34a2e48144bd2dc6ca4bb7: Status 404 returned error can't find the container with id e9300817e2b47f72209ac87ee8ed546e5d7f71915f34a2e48144bd2dc6ca4bb7 Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.980533 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:39 crc kubenswrapper[4818]: I0930 16:59:39.996087 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:39Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.009158 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.020369 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 16:59:40 crc kubenswrapper[4818]: E0930 16:59:40.020514 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.022504 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:40 crc kubenswrapper[4818]: E0930 16:59:40.022602 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.027440 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.186747 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" event={"ID":"8dd02846-2628-4200-a7fe-886042bd15bb","Type":"ContainerStarted","Data":"ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.187216 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" event={"ID":"8dd02846-2628-4200-a7fe-886042bd15bb","Type":"ContainerStarted","Data":"33c2984d76ea7373b84f68be1bacc2c5039cd1e509c5907bef0a992295a5e067"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.189343 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hq6j2" event={"ID":"d36fce8a-ff27-48bf-be9c-67fc2046136d","Type":"ContainerStarted","Data":"dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.189379 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hq6j2" event={"ID":"d36fce8a-ff27-48bf-be9c-67fc2046136d","Type":"ContainerStarted","Data":"07096b78185eb7fe59dbd5440f2ab668c841f5323f1a26d005f7542f788e35cc"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.191444 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-vmncz" event={"ID":"85fe3d18-20dd-467f-be69-fcaa139126f9","Type":"ContainerStarted","Data":"549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.193841 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerStarted","Data":"9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.193890 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerStarted","Data":"8234c605b3d0eeec58479a75de74cad6a66c5d137462112ceacc582bc7477e35"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.197868 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-gd5fd" event={"ID":"17ee0898-ae49-455e-b283-185058ad07b0","Type":"ContainerStarted","Data":"3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.197909 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-gd5fd" event={"ID":"17ee0898-ae49-455e-b283-185058ad07b0","Type":"ContainerStarted","Data":"e7cfdabda7be6d78e4890879897bf395d84bb60f1660d99036c2b4cfe452f8a2"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.200563 4818 generic.go:334] "Generic (PLEG): container finished" podID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerID="177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e" exitCode=0 Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.200626 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerDied","Data":"177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.200657 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerStarted","Data":"e9300817e2b47f72209ac87ee8ed546e5d7f71915f34a2e48144bd2dc6ca4bb7"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.207320 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.221344 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.237466 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.251592 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.269852 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.282849 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.297993 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.308535 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.327514 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.342775 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.362913 4818 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.364729 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.364771 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.364786 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.364900 4818 kubelet_node_status.go:76] "Attempting to register node" node="crc" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.368474 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.384147 4818 kubelet_node_status.go:115] "Node was previously registered" node="crc" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.384380 4818 kubelet_node_status.go:79] "Successfully registered node" node="crc" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.385470 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.385504 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.385514 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.385530 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.385541 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:40Z","lastTransitionTime":"2025-09-30T16:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.411830 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.442822 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: E0930 16:59:40.458412 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.464165 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.464205 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.464218 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.464235 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.464252 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:40Z","lastTransitionTime":"2025-09-30T16:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.473539 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: E0930 16:59:40.481419 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.485097 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.485133 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.485142 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.485162 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.485173 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:40Z","lastTransitionTime":"2025-09-30T16:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.487597 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: E0930 16:59:40.497775 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.498471 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.501565 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.501618 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.501628 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.501644 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.501656 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:40Z","lastTransitionTime":"2025-09-30T16:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.514133 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: E0930 16:59:40.515606 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.519348 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.519402 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.519414 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.519433 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.519797 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:40Z","lastTransitionTime":"2025-09-30T16:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.528495 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: E0930 16:59:40.532331 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: E0930 16:59:40.532489 4818 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.534261 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.534291 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.534301 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.534317 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.534328 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:40Z","lastTransitionTime":"2025-09-30T16:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.542059 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.555711 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.566815 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.586512 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.602373 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.617963 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.628156 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.636007 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.636056 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.636069 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.636088 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.636100 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:40Z","lastTransitionTime":"2025-09-30T16:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.641528 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.655409 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.673239 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:40Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.739115 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.739163 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.739177 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.739196 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.739210 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:40Z","lastTransitionTime":"2025-09-30T16:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.842683 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.842748 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.842763 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.842780 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.842793 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:40Z","lastTransitionTime":"2025-09-30T16:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.945719 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.945766 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.945776 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.945793 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:40 crc kubenswrapper[4818]: I0930 16:59:40.945805 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:40Z","lastTransitionTime":"2025-09-30T16:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.019565 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:41 crc kubenswrapper[4818]: E0930 16:59:41.019732 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.048831 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.048873 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.048881 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.048896 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.048905 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:41Z","lastTransitionTime":"2025-09-30T16:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.160747 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.161227 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.161246 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.161265 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.161278 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:41Z","lastTransitionTime":"2025-09-30T16:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.205636 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerStarted","Data":"00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b"} Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.212031 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerStarted","Data":"1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa"} Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.212072 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerStarted","Data":"db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931"} Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.212081 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerStarted","Data":"1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063"} Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.212090 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerStarted","Data":"423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70"} Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.212097 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerStarted","Data":"95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193"} Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.212105 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerStarted","Data":"bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445"} Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.214100 4818 generic.go:334] "Generic (PLEG): container finished" podID="8dd02846-2628-4200-a7fe-886042bd15bb" containerID="ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444" exitCode=0 Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.214523 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" event={"ID":"8dd02846-2628-4200-a7fe-886042bd15bb","Type":"ContainerDied","Data":"ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444"} Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.219601 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.232429 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.250865 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.264540 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.264581 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.264593 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.264611 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.264621 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:41Z","lastTransitionTime":"2025-09-30T16:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.266224 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.279790 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.304984 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.315560 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.333567 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.345809 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.356631 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.367424 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.367466 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.367476 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.367490 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.367500 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:41Z","lastTransitionTime":"2025-09-30T16:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.370187 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.384368 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.398262 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.413226 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.439363 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.459541 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.471249 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.471296 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.471307 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.471325 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.471338 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:41Z","lastTransitionTime":"2025-09-30T16:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.480461 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.502140 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.524552 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.541408 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.564724 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.574465 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.574514 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.574527 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.574550 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.574565 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:41Z","lastTransitionTime":"2025-09-30T16:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.581603 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.596765 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.612014 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.627407 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.642634 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.660427 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.677700 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.677749 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.677761 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.677781 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.677795 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:41Z","lastTransitionTime":"2025-09-30T16:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.678806 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:41Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.772213 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.772343 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.772371 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:41 crc kubenswrapper[4818]: E0930 16:59:41.772484 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 16:59:49.772442826 +0000 UTC m=+36.526714672 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 16:59:41 crc kubenswrapper[4818]: E0930 16:59:41.772496 4818 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 30 16:59:41 crc kubenswrapper[4818]: E0930 16:59:41.772530 4818 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Sep 30 16:59:41 crc kubenswrapper[4818]: E0930 16:59:41.772581 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:49.772565909 +0000 UTC m=+36.526837755 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 30 16:59:41 crc kubenswrapper[4818]: E0930 16:59:41.772608 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:49.77259534 +0000 UTC m=+36.526867186 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.780677 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.780720 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.780730 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.780747 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.780759 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:41Z","lastTransitionTime":"2025-09-30T16:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.873221 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.873282 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 16:59:41 crc kubenswrapper[4818]: E0930 16:59:41.873431 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 30 16:59:41 crc kubenswrapper[4818]: E0930 16:59:41.873454 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 30 16:59:41 crc kubenswrapper[4818]: E0930 16:59:41.873467 4818 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:41 crc kubenswrapper[4818]: E0930 16:59:41.873466 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 30 16:59:41 crc kubenswrapper[4818]: E0930 16:59:41.873515 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 30 16:59:41 crc kubenswrapper[4818]: E0930 16:59:41.873523 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:49.873506996 +0000 UTC m=+36.627778812 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:41 crc kubenswrapper[4818]: E0930 16:59:41.873536 4818 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:41 crc kubenswrapper[4818]: E0930 16:59:41.873637 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:49.873609738 +0000 UTC m=+36.627881594 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.883897 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.883965 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.883975 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.883993 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.884003 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:41Z","lastTransitionTime":"2025-09-30T16:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.986784 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.986858 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.986876 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.986905 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:41 crc kubenswrapper[4818]: I0930 16:59:41.986952 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:41Z","lastTransitionTime":"2025-09-30T16:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.020272 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.020355 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:42 crc kubenswrapper[4818]: E0930 16:59:42.020515 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 16:59:42 crc kubenswrapper[4818]: E0930 16:59:42.020634 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.089703 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.089760 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.089778 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.089798 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.089815 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:42Z","lastTransitionTime":"2025-09-30T16:59:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.194015 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.194092 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.194108 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.194129 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.194145 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:42Z","lastTransitionTime":"2025-09-30T16:59:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.221260 4818 generic.go:334] "Generic (PLEG): container finished" podID="8dd02846-2628-4200-a7fe-886042bd15bb" containerID="328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41" exitCode=0 Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.221356 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" event={"ID":"8dd02846-2628-4200-a7fe-886042bd15bb","Type":"ContainerDied","Data":"328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41"} Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.244788 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:42Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.267996 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:42Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.295205 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:42Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.297518 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.297581 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.297599 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.297624 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.297649 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:42Z","lastTransitionTime":"2025-09-30T16:59:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.315093 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:42Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.348001 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:42Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.366605 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:42Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.390543 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:42Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.401297 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.401335 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.401347 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.401366 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.401378 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:42Z","lastTransitionTime":"2025-09-30T16:59:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.412430 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:42Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.439347 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:42Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.458761 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:42Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.475383 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:42Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.492362 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:42Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.503510 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.503546 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.503556 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.503589 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.503599 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:42Z","lastTransitionTime":"2025-09-30T16:59:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.509660 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:42Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.531258 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:42Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.606726 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.607001 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.607069 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.607184 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.607246 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:42Z","lastTransitionTime":"2025-09-30T16:59:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.710724 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.710791 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.710810 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.710837 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.710865 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:42Z","lastTransitionTime":"2025-09-30T16:59:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.814434 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.814498 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.814522 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.814553 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.814576 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:42Z","lastTransitionTime":"2025-09-30T16:59:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.917770 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.917829 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.917847 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.917871 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:42 crc kubenswrapper[4818]: I0930 16:59:42.917887 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:42Z","lastTransitionTime":"2025-09-30T16:59:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.019306 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:43 crc kubenswrapper[4818]: E0930 16:59:43.019460 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.020813 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.020904 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.020974 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.020999 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.021015 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:43Z","lastTransitionTime":"2025-09-30T16:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.123087 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.123144 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.123161 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.123182 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.123199 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:43Z","lastTransitionTime":"2025-09-30T16:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.224962 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.224999 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.225008 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.225024 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.225036 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:43Z","lastTransitionTime":"2025-09-30T16:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.230019 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerStarted","Data":"d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0"} Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.232468 4818 generic.go:334] "Generic (PLEG): container finished" podID="8dd02846-2628-4200-a7fe-886042bd15bb" containerID="271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40" exitCode=0 Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.232517 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" event={"ID":"8dd02846-2628-4200-a7fe-886042bd15bb","Type":"ContainerDied","Data":"271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40"} Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.254711 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:43Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.266886 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:43Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.280994 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:43Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.292273 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:43Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.311317 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:43Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.323071 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:43Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.328218 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.328302 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.328320 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.328343 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.328359 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:43Z","lastTransitionTime":"2025-09-30T16:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.336433 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:43Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.350909 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:43Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.368474 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:43Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.383097 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:43Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.397424 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:43Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.417936 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:43Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.431429 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.431479 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.431492 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.431511 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.431524 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:43Z","lastTransitionTime":"2025-09-30T16:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.440298 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:43Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.452457 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:43Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.534179 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.534217 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.534225 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.534239 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.534265 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:43Z","lastTransitionTime":"2025-09-30T16:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.637502 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.637545 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.637556 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.637578 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.637591 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:43Z","lastTransitionTime":"2025-09-30T16:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.739837 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.740213 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.740280 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.740320 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.740353 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:43Z","lastTransitionTime":"2025-09-30T16:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.843705 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.843768 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.843785 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.843815 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.843833 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:43Z","lastTransitionTime":"2025-09-30T16:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.957077 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.957628 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.957659 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.957681 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:43 crc kubenswrapper[4818]: I0930 16:59:43.957696 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:43Z","lastTransitionTime":"2025-09-30T16:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.019855 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.019881 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:44 crc kubenswrapper[4818]: E0930 16:59:44.020154 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 16:59:44 crc kubenswrapper[4818]: E0930 16:59:44.020227 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.038895 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.058456 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.061419 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.061495 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.061518 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.061549 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.061571 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:44Z","lastTransitionTime":"2025-09-30T16:59:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.076906 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.096772 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.116469 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.133269 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.149699 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.165627 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.165670 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.165682 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.165700 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.165715 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:44Z","lastTransitionTime":"2025-09-30T16:59:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.175451 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.193680 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.206835 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.221742 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.238963 4818 generic.go:334] "Generic (PLEG): container finished" podID="8dd02846-2628-4200-a7fe-886042bd15bb" containerID="83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f" exitCode=0 Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.239013 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" event={"ID":"8dd02846-2628-4200-a7fe-886042bd15bb","Type":"ContainerDied","Data":"83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f"} Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.244685 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.257997 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.268401 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.268666 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.268884 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.269113 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.269247 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:44Z","lastTransitionTime":"2025-09-30T16:59:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.277564 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.291466 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.317376 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.337201 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.351456 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.368119 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.372059 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.372089 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.372099 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.372121 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.372132 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:44Z","lastTransitionTime":"2025-09-30T16:59:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.380058 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.398969 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.415509 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.430044 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.444489 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.458560 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.472530 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.475442 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.475476 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.475489 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.475506 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.475518 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:44Z","lastTransitionTime":"2025-09-30T16:59:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.487221 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.509871 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:44Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.578554 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.578602 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.578617 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.578642 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.578657 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:44Z","lastTransitionTime":"2025-09-30T16:59:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.682329 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.682386 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.682402 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.682430 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.682448 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:44Z","lastTransitionTime":"2025-09-30T16:59:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.785658 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.785714 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.785725 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.785747 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.785760 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:44Z","lastTransitionTime":"2025-09-30T16:59:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.889307 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.889352 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.889366 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.889384 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.889396 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:44Z","lastTransitionTime":"2025-09-30T16:59:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.992170 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.992204 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.992214 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.992230 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:44 crc kubenswrapper[4818]: I0930 16:59:44.992253 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:44Z","lastTransitionTime":"2025-09-30T16:59:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.019953 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:45 crc kubenswrapper[4818]: E0930 16:59:45.020159 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.094790 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.094880 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.094902 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.094962 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.094984 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:45Z","lastTransitionTime":"2025-09-30T16:59:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.198828 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.198877 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.198895 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.198948 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.198970 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:45Z","lastTransitionTime":"2025-09-30T16:59:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.252320 4818 generic.go:334] "Generic (PLEG): container finished" podID="8dd02846-2628-4200-a7fe-886042bd15bb" containerID="aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544" exitCode=0 Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.252405 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" event={"ID":"8dd02846-2628-4200-a7fe-886042bd15bb","Type":"ContainerDied","Data":"aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544"} Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.270639 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:45Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.288707 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:45Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.303075 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.303131 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.303152 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.303179 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.303199 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:45Z","lastTransitionTime":"2025-09-30T16:59:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.311429 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:45Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.326823 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:45Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.345324 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:45Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.361031 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:45Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.376616 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:45Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.391909 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:45Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.405500 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:45Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.406987 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.407035 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.407048 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.407068 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.407082 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:45Z","lastTransitionTime":"2025-09-30T16:59:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.422410 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:45Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.437671 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:45Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.457226 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:45Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.468624 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:45Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.490455 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:45Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.510032 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.510095 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.510112 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.510137 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.510153 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:45Z","lastTransitionTime":"2025-09-30T16:59:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.613607 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.613662 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.613674 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.613690 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.613700 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:45Z","lastTransitionTime":"2025-09-30T16:59:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.716283 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.716352 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.716376 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.716409 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.716432 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:45Z","lastTransitionTime":"2025-09-30T16:59:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.819102 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.819150 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.819165 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.819181 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.819195 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:45Z","lastTransitionTime":"2025-09-30T16:59:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.923717 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.923804 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.923830 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.923865 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:45 crc kubenswrapper[4818]: I0930 16:59:45.923893 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:45Z","lastTransitionTime":"2025-09-30T16:59:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.019343 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.019393 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:46 crc kubenswrapper[4818]: E0930 16:59:46.019512 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 16:59:46 crc kubenswrapper[4818]: E0930 16:59:46.019621 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.026982 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.027029 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.027040 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.027060 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.027071 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:46Z","lastTransitionTime":"2025-09-30T16:59:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.129703 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.129773 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.129797 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.129826 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.129850 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:46Z","lastTransitionTime":"2025-09-30T16:59:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.232961 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.233049 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.233073 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.233109 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.233133 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:46Z","lastTransitionTime":"2025-09-30T16:59:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.264164 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerStarted","Data":"5eabfebb1b400bed53b275a9853c4c3491ab5f85c7794b6aba2e858026734e45"} Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.264405 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.264421 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.269018 4818 generic.go:334] "Generic (PLEG): container finished" podID="8dd02846-2628-4200-a7fe-886042bd15bb" containerID="c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee" exitCode=0 Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.269052 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" event={"ID":"8dd02846-2628-4200-a7fe-886042bd15bb","Type":"ContainerDied","Data":"c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee"} Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.287008 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.303552 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.304880 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.305261 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.322324 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.335597 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.335638 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.335651 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.335671 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.335684 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:46Z","lastTransitionTime":"2025-09-30T16:59:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.340591 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.355369 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.371247 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.388090 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.402499 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.416541 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.431563 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.438702 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.438740 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.438751 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.438768 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.438781 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:46Z","lastTransitionTime":"2025-09-30T16:59:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.446898 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.462491 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.472689 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.493052 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eabfebb1b400bed53b275a9853c4c3491ab5f85c7794b6aba2e858026734e45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.505215 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.517152 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.529874 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.540719 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.540748 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.540757 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.540772 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.540782 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:46Z","lastTransitionTime":"2025-09-30T16:59:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.541220 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.555712 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.568971 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.584424 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.600842 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.614445 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.628340 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.643799 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.643882 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.643909 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.643987 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.644009 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:46Z","lastTransitionTime":"2025-09-30T16:59:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.645852 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.662233 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.683069 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eabfebb1b400bed53b275a9853c4c3491ab5f85c7794b6aba2e858026734e45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.697434 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:46Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.746604 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.746641 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.746650 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.746662 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.746672 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:46Z","lastTransitionTime":"2025-09-30T16:59:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.849053 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.849111 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.849127 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.849151 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.849168 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:46Z","lastTransitionTime":"2025-09-30T16:59:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.952550 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.952893 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.952907 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.952945 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:46 crc kubenswrapper[4818]: I0930 16:59:46.952957 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:46Z","lastTransitionTime":"2025-09-30T16:59:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.019565 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:47 crc kubenswrapper[4818]: E0930 16:59:47.019759 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.055608 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.055664 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.055676 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.055696 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.055710 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:47Z","lastTransitionTime":"2025-09-30T16:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.158654 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.158727 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.158743 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.159270 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.159330 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:47Z","lastTransitionTime":"2025-09-30T16:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.262870 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.262952 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.262965 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.262984 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.262998 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:47Z","lastTransitionTime":"2025-09-30T16:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.277251 4818 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.277494 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" event={"ID":"8dd02846-2628-4200-a7fe-886042bd15bb","Type":"ContainerStarted","Data":"31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4"} Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.296538 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:47Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.316702 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:47Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.335878 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:47Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.351135 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:47Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.365428 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.365461 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.365470 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.365486 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.365495 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:47Z","lastTransitionTime":"2025-09-30T16:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.374037 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eabfebb1b400bed53b275a9853c4c3491ab5f85c7794b6aba2e858026734e45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:47Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.387786 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:47Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.402331 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:47Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.414496 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:47Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.427792 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:47Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.439543 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:47Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.453635 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:47Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.467172 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.467205 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.467213 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.467231 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.467241 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:47Z","lastTransitionTime":"2025-09-30T16:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.470133 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:47Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.483692 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:47Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.498786 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:47Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.570752 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.570811 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.570830 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.570854 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.570872 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:47Z","lastTransitionTime":"2025-09-30T16:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.674101 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.674258 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.674327 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.674371 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.674394 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:47Z","lastTransitionTime":"2025-09-30T16:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.777882 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.777992 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.778018 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.778048 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.778070 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:47Z","lastTransitionTime":"2025-09-30T16:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.880858 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.880930 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.880943 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.880959 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.880971 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:47Z","lastTransitionTime":"2025-09-30T16:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.983857 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.983905 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.983936 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.983955 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:47 crc kubenswrapper[4818]: I0930 16:59:47.983968 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:47Z","lastTransitionTime":"2025-09-30T16:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.019503 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.019548 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:48 crc kubenswrapper[4818]: E0930 16:59:48.019760 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 16:59:48 crc kubenswrapper[4818]: E0930 16:59:48.019817 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.087171 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.087219 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.087231 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.087250 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.087264 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:48Z","lastTransitionTime":"2025-09-30T16:59:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.189627 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.189664 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.189672 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.189684 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.189694 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:48Z","lastTransitionTime":"2025-09-30T16:59:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.280355 4818 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.291959 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.292002 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.292012 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.292031 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.292043 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:48Z","lastTransitionTime":"2025-09-30T16:59:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.394608 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.394684 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.394697 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.394717 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.394736 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:48Z","lastTransitionTime":"2025-09-30T16:59:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.496825 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.496882 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.496900 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.496946 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.496961 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:48Z","lastTransitionTime":"2025-09-30T16:59:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.599992 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.600069 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.600091 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.600122 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.600146 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:48Z","lastTransitionTime":"2025-09-30T16:59:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.703535 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.703579 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.703587 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.703603 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.703614 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:48Z","lastTransitionTime":"2025-09-30T16:59:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.806798 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.806855 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.806867 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.806941 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.806955 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:48Z","lastTransitionTime":"2025-09-30T16:59:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.910198 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.910272 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.910290 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.910319 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:48 crc kubenswrapper[4818]: I0930 16:59:48.910337 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:48Z","lastTransitionTime":"2025-09-30T16:59:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.014319 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.014379 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.014395 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.014419 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.014438 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:49Z","lastTransitionTime":"2025-09-30T16:59:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.019634 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:49 crc kubenswrapper[4818]: E0930 16:59:49.019822 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.117207 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.117279 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.117302 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.117329 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.117347 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:49Z","lastTransitionTime":"2025-09-30T16:59:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.220857 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.220904 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.220914 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.220948 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.220960 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:49Z","lastTransitionTime":"2025-09-30T16:59:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.286879 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-ljmfd_68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/ovnkube-controller/0.log" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.291828 4818 generic.go:334] "Generic (PLEG): container finished" podID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerID="5eabfebb1b400bed53b275a9853c4c3491ab5f85c7794b6aba2e858026734e45" exitCode=1 Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.292087 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerDied","Data":"5eabfebb1b400bed53b275a9853c4c3491ab5f85c7794b6aba2e858026734e45"} Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.293650 4818 scope.go:117] "RemoveContainer" containerID="5eabfebb1b400bed53b275a9853c4c3491ab5f85c7794b6aba2e858026734e45" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.311529 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:49Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.323605 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.323672 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.323694 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.323721 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.323742 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:49Z","lastTransitionTime":"2025-09-30T16:59:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.334014 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eabfebb1b400bed53b275a9853c4c3491ab5f85c7794b6aba2e858026734e45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5eabfebb1b400bed53b275a9853c4c3491ab5f85c7794b6aba2e858026734e45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T16:59:48Z\\\",\\\"message\\\":\\\"qos/v1/apis/informers/externalversions/factory.go:140\\\\nI0930 16:59:48.770772 6107 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0930 16:59:48.770795 6107 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0930 16:59:48.770828 6107 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0930 16:59:48.770834 6107 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0930 16:59:48.770910 6107 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0930 16:59:48.770945 6107 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0930 16:59:48.770966 6107 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0930 16:59:48.770972 6107 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0930 16:59:48.771007 6107 factory.go:656] Stopping watch factory\\\\nI0930 16:59:48.771022 6107 ovnkube.go:599] Stopped ovnkube\\\\nI0930 16:59:48.771055 6107 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0930 16:59:48.771070 6107 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0930 16:59:48.771078 6107 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0930 16:59:48.771086 6107 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0930 16:59:48.771093 6107 handler.go:208] Removed *v1.Node event handler 2\\\\nI09\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:49Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.350252 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:49Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.372613 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:49Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.391030 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:49Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.405379 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:49Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.418012 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:49Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.426145 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.426182 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.426192 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.426210 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.426221 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:49Z","lastTransitionTime":"2025-09-30T16:59:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.437723 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:49Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.450905 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:49Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.467641 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:49Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.501885 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:49Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.525226 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:49Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.531596 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.531641 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.531655 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.531673 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.531691 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:49Z","lastTransitionTime":"2025-09-30T16:59:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.543819 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:49Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.565712 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:49Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.633830 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.633879 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.633889 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.633912 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.633941 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:49Z","lastTransitionTime":"2025-09-30T16:59:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.736306 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.736340 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.736350 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.736367 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.736376 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:49Z","lastTransitionTime":"2025-09-30T16:59:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.790294 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.790406 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.790432 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:49 crc kubenswrapper[4818]: E0930 16:59:49.790549 4818 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 30 16:59:49 crc kubenswrapper[4818]: E0930 16:59:49.790595 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-30 17:00:05.790582819 +0000 UTC m=+52.544854625 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 30 16:59:49 crc kubenswrapper[4818]: E0930 16:59:49.790832 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:00:05.790821725 +0000 UTC m=+52.545093541 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 16:59:49 crc kubenswrapper[4818]: E0930 16:59:49.790855 4818 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Sep 30 16:59:49 crc kubenswrapper[4818]: E0930 16:59:49.790982 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-30 17:00:05.790956498 +0000 UTC m=+52.545228314 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.839454 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.839513 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.839524 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.839545 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.839560 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:49Z","lastTransitionTime":"2025-09-30T16:59:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.890885 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.890945 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 16:59:49 crc kubenswrapper[4818]: E0930 16:59:49.891085 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 30 16:59:49 crc kubenswrapper[4818]: E0930 16:59:49.891099 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 30 16:59:49 crc kubenswrapper[4818]: E0930 16:59:49.891099 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 30 16:59:49 crc kubenswrapper[4818]: E0930 16:59:49.891141 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 30 16:59:49 crc kubenswrapper[4818]: E0930 16:59:49.891110 4818 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:49 crc kubenswrapper[4818]: E0930 16:59:49.891153 4818 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:49 crc kubenswrapper[4818]: E0930 16:59:49.891197 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-09-30 17:00:05.891183677 +0000 UTC m=+52.645455493 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:49 crc kubenswrapper[4818]: E0930 16:59:49.891213 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-09-30 17:00:05.891206468 +0000 UTC m=+52.645478284 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.941597 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.941642 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.941658 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.941676 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:49 crc kubenswrapper[4818]: I0930 16:59:49.941688 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:49Z","lastTransitionTime":"2025-09-30T16:59:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.020451 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.020476 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 16:59:50 crc kubenswrapper[4818]: E0930 16:59:50.021033 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 16:59:50 crc kubenswrapper[4818]: E0930 16:59:50.021055 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.044576 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.044601 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.044609 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.044622 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.044631 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:50Z","lastTransitionTime":"2025-09-30T16:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.147876 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.147915 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.147941 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.147956 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.147967 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:50Z","lastTransitionTime":"2025-09-30T16:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.250951 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.251192 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.251335 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.251436 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.251513 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:50Z","lastTransitionTime":"2025-09-30T16:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.298267 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-ljmfd_68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/ovnkube-controller/1.log" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.299872 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-ljmfd_68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/ovnkube-controller/0.log" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.303075 4818 generic.go:334] "Generic (PLEG): container finished" podID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerID="0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e" exitCode=1 Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.303108 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerDied","Data":"0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e"} Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.303153 4818 scope.go:117] "RemoveContainer" containerID="5eabfebb1b400bed53b275a9853c4c3491ab5f85c7794b6aba2e858026734e45" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.304302 4818 scope.go:117] "RemoveContainer" containerID="0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e" Sep 30 16:59:50 crc kubenswrapper[4818]: E0930 16:59:50.304556 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-ljmfd_openshift-ovn-kubernetes(68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.324371 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.342508 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5eabfebb1b400bed53b275a9853c4c3491ab5f85c7794b6aba2e858026734e45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T16:59:48Z\\\",\\\"message\\\":\\\"qos/v1/apis/informers/externalversions/factory.go:140\\\\nI0930 16:59:48.770772 6107 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0930 16:59:48.770795 6107 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0930 16:59:48.770828 6107 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0930 16:59:48.770834 6107 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0930 16:59:48.770910 6107 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0930 16:59:48.770945 6107 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0930 16:59:48.770966 6107 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0930 16:59:48.770972 6107 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0930 16:59:48.771007 6107 factory.go:656] Stopping watch factory\\\\nI0930 16:59:48.771022 6107 ovnkube.go:599] Stopped ovnkube\\\\nI0930 16:59:48.771055 6107 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0930 16:59:48.771070 6107 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0930 16:59:48.771078 6107 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0930 16:59:48.771086 6107 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0930 16:59:48.771093 6107 handler.go:208] Removed *v1.Node event handler 2\\\\nI09\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"ector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188090 6268 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188238 6268 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188287 6268 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188418 6268 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188628 6268 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188772 6268 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188844 6268 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.189205 6268 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0930 16:59:50.189260 6268 factory.go:656] Stopping watch factory\\\\nI0930 16:59:50.189288 6268 ovnkube.go:599] Stopped ovnkube\\\\nI0930 16:59:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.353846 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.353892 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.353904 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.353973 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.353988 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:50Z","lastTransitionTime":"2025-09-30T16:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.364360 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.379208 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.394413 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.408629 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.422153 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.433388 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.446824 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.456958 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.456988 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.456999 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.457016 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.457029 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:50Z","lastTransitionTime":"2025-09-30T16:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.465205 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.480033 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.498351 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.517143 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.550388 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.560259 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.560316 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.560336 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.560361 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.560380 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:50Z","lastTransitionTime":"2025-09-30T16:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.664338 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.664382 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.664397 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.664421 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.664441 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:50Z","lastTransitionTime":"2025-09-30T16:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.713387 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.737589 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.755016 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.767183 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.767246 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.767265 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.767290 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.767307 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:50Z","lastTransitionTime":"2025-09-30T16:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.772666 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.792333 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.812081 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.822975 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.823026 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.823050 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.823079 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.823105 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:50Z","lastTransitionTime":"2025-09-30T16:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.836334 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: E0930 16:59:50.844661 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.849684 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.849781 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.849801 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.849826 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.849845 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:50Z","lastTransitionTime":"2025-09-30T16:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.854534 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: E0930 16:59:50.867746 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.872486 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.872551 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.872569 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.872592 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.872609 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:50Z","lastTransitionTime":"2025-09-30T16:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.884454 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5eabfebb1b400bed53b275a9853c4c3491ab5f85c7794b6aba2e858026734e45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T16:59:48Z\\\",\\\"message\\\":\\\"qos/v1/apis/informers/externalversions/factory.go:140\\\\nI0930 16:59:48.770772 6107 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0930 16:59:48.770795 6107 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0930 16:59:48.770828 6107 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0930 16:59:48.770834 6107 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0930 16:59:48.770910 6107 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0930 16:59:48.770945 6107 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0930 16:59:48.770966 6107 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0930 16:59:48.770972 6107 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0930 16:59:48.771007 6107 factory.go:656] Stopping watch factory\\\\nI0930 16:59:48.771022 6107 ovnkube.go:599] Stopped ovnkube\\\\nI0930 16:59:48.771055 6107 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0930 16:59:48.771070 6107 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0930 16:59:48.771078 6107 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0930 16:59:48.771086 6107 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0930 16:59:48.771093 6107 handler.go:208] Removed *v1.Node event handler 2\\\\nI09\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"ector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188090 6268 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188238 6268 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188287 6268 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188418 6268 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188628 6268 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188772 6268 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188844 6268 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.189205 6268 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0930 16:59:50.189260 6268 factory.go:656] Stopping watch factory\\\\nI0930 16:59:50.189288 6268 ovnkube.go:599] Stopped ovnkube\\\\nI0930 16:59:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: E0930 16:59:50.893663 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.900250 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.900318 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.900338 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.900366 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.900387 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:50Z","lastTransitionTime":"2025-09-30T16:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.908137 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: E0930 16:59:50.920149 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.925283 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.925351 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.925369 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.925394 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.925413 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:50Z","lastTransitionTime":"2025-09-30T16:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.926970 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: E0930 16:59:50.946167 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: E0930 16:59:50.946410 4818 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.947196 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.948838 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.948887 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.948905 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.948957 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.948977 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:50Z","lastTransitionTime":"2025-09-30T16:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.965784 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:50 crc kubenswrapper[4818]: I0930 16:59:50.991114 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:50Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.007780 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:51Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.020249 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:51 crc kubenswrapper[4818]: E0930 16:59:51.020490 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.052346 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.052414 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.052445 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.052475 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.052494 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:51Z","lastTransitionTime":"2025-09-30T16:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.157628 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.157715 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.157738 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.157767 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.157788 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:51Z","lastTransitionTime":"2025-09-30T16:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.261528 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.261593 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.261609 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.261633 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.261655 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:51Z","lastTransitionTime":"2025-09-30T16:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.309381 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-ljmfd_68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/ovnkube-controller/1.log" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.364095 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.364177 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.364194 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.364250 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.364270 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:51Z","lastTransitionTime":"2025-09-30T16:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.468067 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.468118 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.468140 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.468166 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.468246 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:51Z","lastTransitionTime":"2025-09-30T16:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.571830 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.571902 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.571938 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.571963 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.571980 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:51Z","lastTransitionTime":"2025-09-30T16:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.675022 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.675108 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.675134 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.675163 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.675186 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:51Z","lastTransitionTime":"2025-09-30T16:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.779466 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.779548 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.779566 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.779596 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.779615 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:51Z","lastTransitionTime":"2025-09-30T16:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.882585 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.882661 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.882678 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.882710 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.882729 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:51Z","lastTransitionTime":"2025-09-30T16:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.985876 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.986014 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.986078 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.986107 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:51 crc kubenswrapper[4818]: I0930 16:59:51.986127 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:51Z","lastTransitionTime":"2025-09-30T16:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.020368 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.020397 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 16:59:52 crc kubenswrapper[4818]: E0930 16:59:52.020598 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 16:59:52 crc kubenswrapper[4818]: E0930 16:59:52.021030 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.088866 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.088971 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.088996 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.089027 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.089050 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:52Z","lastTransitionTime":"2025-09-30T16:59:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.192168 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.192253 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.192275 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.192307 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.192329 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:52Z","lastTransitionTime":"2025-09-30T16:59:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.295608 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.295736 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.295756 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.295786 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.295804 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:52Z","lastTransitionTime":"2025-09-30T16:59:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.398740 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.398792 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.398809 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.398831 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.398849 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:52Z","lastTransitionTime":"2025-09-30T16:59:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.502100 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.502146 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.502163 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.502188 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.502205 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:52Z","lastTransitionTime":"2025-09-30T16:59:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.605798 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.605852 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.605864 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.605883 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.605896 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:52Z","lastTransitionTime":"2025-09-30T16:59:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.708726 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.708787 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.708804 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.708833 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.708850 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:52Z","lastTransitionTime":"2025-09-30T16:59:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.811006 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.811048 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.811059 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.811077 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.811089 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:52Z","lastTransitionTime":"2025-09-30T16:59:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.914375 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.914723 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.914856 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.915005 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:52 crc kubenswrapper[4818]: I0930 16:59:52.915116 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:52Z","lastTransitionTime":"2025-09-30T16:59:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.019146 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.019236 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.019262 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.019296 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.019324 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:53Z","lastTransitionTime":"2025-09-30T16:59:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.019384 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:53 crc kubenswrapper[4818]: E0930 16:59:53.019517 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.028690 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t"] Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.029345 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.031793 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.032497 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.050654 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:53Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.083671 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5eabfebb1b400bed53b275a9853c4c3491ab5f85c7794b6aba2e858026734e45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T16:59:48Z\\\",\\\"message\\\":\\\"qos/v1/apis/informers/externalversions/factory.go:140\\\\nI0930 16:59:48.770772 6107 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0930 16:59:48.770795 6107 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0930 16:59:48.770828 6107 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0930 16:59:48.770834 6107 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0930 16:59:48.770910 6107 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0930 16:59:48.770945 6107 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0930 16:59:48.770966 6107 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0930 16:59:48.770972 6107 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0930 16:59:48.771007 6107 factory.go:656] Stopping watch factory\\\\nI0930 16:59:48.771022 6107 ovnkube.go:599] Stopped ovnkube\\\\nI0930 16:59:48.771055 6107 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0930 16:59:48.771070 6107 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0930 16:59:48.771078 6107 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0930 16:59:48.771086 6107 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0930 16:59:48.771093 6107 handler.go:208] Removed *v1.Node event handler 2\\\\nI09\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"ector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188090 6268 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188238 6268 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188287 6268 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188418 6268 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188628 6268 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188772 6268 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188844 6268 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.189205 6268 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0930 16:59:50.189260 6268 factory.go:656] Stopping watch factory\\\\nI0930 16:59:50.189288 6268 ovnkube.go:599] Stopped ovnkube\\\\nI0930 16:59:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:53Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.097606 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:53Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.117014 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:53Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.122178 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.122207 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.122215 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.122230 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.122240 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:53Z","lastTransitionTime":"2025-09-30T16:59:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.135127 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:53Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.154253 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:53Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.169338 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:53Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.185719 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:53Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.203267 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:53Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.221177 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:53Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.224709 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/fce7a982-d6f8-46fc-94e2-c029a2b439c5-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-j579t\" (UID: \"fce7a982-d6f8-46fc-94e2-c029a2b439c5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.224785 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/fce7a982-d6f8-46fc-94e2-c029a2b439c5-env-overrides\") pod \"ovnkube-control-plane-749d76644c-j579t\" (UID: \"fce7a982-d6f8-46fc-94e2-c029a2b439c5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.224837 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/fce7a982-d6f8-46fc-94e2-c029a2b439c5-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-j579t\" (UID: \"fce7a982-d6f8-46fc-94e2-c029a2b439c5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.224873 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhbgl\" (UniqueName: \"kubernetes.io/projected/fce7a982-d6f8-46fc-94e2-c029a2b439c5-kube-api-access-jhbgl\") pod \"ovnkube-control-plane-749d76644c-j579t\" (UID: \"fce7a982-d6f8-46fc-94e2-c029a2b439c5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.225381 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.225417 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.225426 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.225442 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.225453 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:53Z","lastTransitionTime":"2025-09-30T16:59:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.244791 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:53Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.261954 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:53Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.281183 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:53Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.311380 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:53Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.325309 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/fce7a982-d6f8-46fc-94e2-c029a2b439c5-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-j579t\" (UID: \"fce7a982-d6f8-46fc-94e2-c029a2b439c5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.325363 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhbgl\" (UniqueName: \"kubernetes.io/projected/fce7a982-d6f8-46fc-94e2-c029a2b439c5-kube-api-access-jhbgl\") pod \"ovnkube-control-plane-749d76644c-j579t\" (UID: \"fce7a982-d6f8-46fc-94e2-c029a2b439c5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.325427 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/fce7a982-d6f8-46fc-94e2-c029a2b439c5-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-j579t\" (UID: \"fce7a982-d6f8-46fc-94e2-c029a2b439c5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.325455 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/fce7a982-d6f8-46fc-94e2-c029a2b439c5-env-overrides\") pod \"ovnkube-control-plane-749d76644c-j579t\" (UID: \"fce7a982-d6f8-46fc-94e2-c029a2b439c5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.326236 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/fce7a982-d6f8-46fc-94e2-c029a2b439c5-env-overrides\") pod \"ovnkube-control-plane-749d76644c-j579t\" (UID: \"fce7a982-d6f8-46fc-94e2-c029a2b439c5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.327663 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.327712 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.327721 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.327740 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.327753 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:53Z","lastTransitionTime":"2025-09-30T16:59:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.328668 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/fce7a982-d6f8-46fc-94e2-c029a2b439c5-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-j579t\" (UID: \"fce7a982-d6f8-46fc-94e2-c029a2b439c5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.329428 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fce7a982-d6f8-46fc-94e2-c029a2b439c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-j579t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:53Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.334023 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/fce7a982-d6f8-46fc-94e2-c029a2b439c5-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-j579t\" (UID: \"fce7a982-d6f8-46fc-94e2-c029a2b439c5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.351756 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhbgl\" (UniqueName: \"kubernetes.io/projected/fce7a982-d6f8-46fc-94e2-c029a2b439c5-kube-api-access-jhbgl\") pod \"ovnkube-control-plane-749d76644c-j579t\" (UID: \"fce7a982-d6f8-46fc-94e2-c029a2b439c5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.351987 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.431103 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.431166 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.431183 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.431208 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.431226 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:53Z","lastTransitionTime":"2025-09-30T16:59:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.534239 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.534288 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.534305 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.534328 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.534345 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:53Z","lastTransitionTime":"2025-09-30T16:59:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.637254 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.637323 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.637337 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.637355 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.637367 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:53Z","lastTransitionTime":"2025-09-30T16:59:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.739765 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.739809 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.739819 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.739836 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.739850 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:53Z","lastTransitionTime":"2025-09-30T16:59:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.842956 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.842986 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.842999 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.843015 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.843026 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:53Z","lastTransitionTime":"2025-09-30T16:59:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.945117 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.945156 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.945164 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.945177 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:53 crc kubenswrapper[4818]: I0930 16:59:53.945186 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:53Z","lastTransitionTime":"2025-09-30T16:59:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.020161 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.020243 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:54 crc kubenswrapper[4818]: E0930 16:59:54.020380 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 16:59:54 crc kubenswrapper[4818]: E0930 16:59:54.020579 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.037234 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.048335 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.048363 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.048371 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.048385 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.048395 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:54Z","lastTransitionTime":"2025-09-30T16:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.052577 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.075382 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.097408 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.122235 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.146038 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.151580 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.151653 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.151680 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.151715 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.151739 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:54Z","lastTransitionTime":"2025-09-30T16:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.167282 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.183315 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.204273 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.219994 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.236127 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.256321 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.256613 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.256656 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.256676 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.256695 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.256710 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:54Z","lastTransitionTime":"2025-09-30T16:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.274256 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fce7a982-d6f8-46fc-94e2-c029a2b439c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-j579t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.287391 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.308637 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5eabfebb1b400bed53b275a9853c4c3491ab5f85c7794b6aba2e858026734e45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T16:59:48Z\\\",\\\"message\\\":\\\"qos/v1/apis/informers/externalversions/factory.go:140\\\\nI0930 16:59:48.770772 6107 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0930 16:59:48.770795 6107 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0930 16:59:48.770828 6107 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0930 16:59:48.770834 6107 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0930 16:59:48.770910 6107 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0930 16:59:48.770945 6107 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0930 16:59:48.770966 6107 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0930 16:59:48.770972 6107 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0930 16:59:48.771007 6107 factory.go:656] Stopping watch factory\\\\nI0930 16:59:48.771022 6107 ovnkube.go:599] Stopped ovnkube\\\\nI0930 16:59:48.771055 6107 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0930 16:59:48.771070 6107 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0930 16:59:48.771078 6107 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0930 16:59:48.771086 6107 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0930 16:59:48.771093 6107 handler.go:208] Removed *v1.Node event handler 2\\\\nI09\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"ector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188090 6268 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188238 6268 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188287 6268 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188418 6268 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188628 6268 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188772 6268 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188844 6268 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.189205 6268 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0930 16:59:50.189260 6268 factory.go:656] Stopping watch factory\\\\nI0930 16:59:50.189288 6268 ovnkube.go:599] Stopped ovnkube\\\\nI0930 16:59:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.329339 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" event={"ID":"fce7a982-d6f8-46fc-94e2-c029a2b439c5","Type":"ContainerStarted","Data":"1e9cc726587a32bf3541a30a6b1ad2d189fdb4585fd5a17f84b6b6b2ac77eb6f"} Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.329583 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" event={"ID":"fce7a982-d6f8-46fc-94e2-c029a2b439c5","Type":"ContainerStarted","Data":"bd8d3f0c4fcccedb06fbc60c7a25bcf4e89f76327f03e068f83727d88a49a1e5"} Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.329656 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" event={"ID":"fce7a982-d6f8-46fc-94e2-c029a2b439c5","Type":"ContainerStarted","Data":"b192bd3c8fab28d034a27ac0424edf25d570f1a8a40145dd6bf622f646644631"} Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.346986 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.359074 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.359097 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.359104 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.359314 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.359324 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:54Z","lastTransitionTime":"2025-09-30T16:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.362170 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.381978 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.398648 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fce7a982-d6f8-46fc-94e2-c029a2b439c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd8d3f0c4fcccedb06fbc60c7a25bcf4e89f76327f03e068f83727d88a49a1e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e9cc726587a32bf3541a30a6b1ad2d189fdb4585fd5a17f84b6b6b2ac77eb6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-j579t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.410279 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.436828 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5eabfebb1b400bed53b275a9853c4c3491ab5f85c7794b6aba2e858026734e45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T16:59:48Z\\\",\\\"message\\\":\\\"qos/v1/apis/informers/externalversions/factory.go:140\\\\nI0930 16:59:48.770772 6107 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0930 16:59:48.770795 6107 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0930 16:59:48.770828 6107 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0930 16:59:48.770834 6107 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0930 16:59:48.770910 6107 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0930 16:59:48.770945 6107 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0930 16:59:48.770966 6107 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0930 16:59:48.770972 6107 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0930 16:59:48.771007 6107 factory.go:656] Stopping watch factory\\\\nI0930 16:59:48.771022 6107 ovnkube.go:599] Stopped ovnkube\\\\nI0930 16:59:48.771055 6107 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0930 16:59:48.771070 6107 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0930 16:59:48.771078 6107 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0930 16:59:48.771086 6107 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0930 16:59:48.771093 6107 handler.go:208] Removed *v1.Node event handler 2\\\\nI09\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"ector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188090 6268 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188238 6268 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188287 6268 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188418 6268 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188628 6268 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188772 6268 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188844 6268 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.189205 6268 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0930 16:59:50.189260 6268 factory.go:656] Stopping watch factory\\\\nI0930 16:59:50.189288 6268 ovnkube.go:599] Stopped ovnkube\\\\nI0930 16:59:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.449190 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.462022 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.462088 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.462111 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.462145 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.462170 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:54Z","lastTransitionTime":"2025-09-30T16:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.472739 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.488006 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.510350 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.526981 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.533624 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-4p4hg"] Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.536219 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 16:59:54 crc kubenswrapper[4818]: E0930 16:59:54.537356 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.539642 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvqst\" (UniqueName: \"kubernetes.io/projected/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-kube-api-access-bvqst\") pod \"network-metrics-daemon-4p4hg\" (UID: \"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\") " pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.539718 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs\") pod \"network-metrics-daemon-4p4hg\" (UID: \"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\") " pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.541982 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.560116 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.564673 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.564866 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.565074 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.565223 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.565348 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:54Z","lastTransitionTime":"2025-09-30T16:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.573446 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.589382 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.606916 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.624670 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.640574 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvqst\" (UniqueName: \"kubernetes.io/projected/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-kube-api-access-bvqst\") pod \"network-metrics-daemon-4p4hg\" (UID: \"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\") " pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.640703 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs\") pod \"network-metrics-daemon-4p4hg\" (UID: \"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\") " pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 16:59:54 crc kubenswrapper[4818]: E0930 16:59:54.640999 4818 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Sep 30 16:59:54 crc kubenswrapper[4818]: E0930 16:59:54.641157 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs podName:3712d08f-58c2-4fff-9d9f-443ba37fc9c0 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:55.141120574 +0000 UTC m=+41.895392421 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs") pod "network-metrics-daemon-4p4hg" (UID: "3712d08f-58c2-4fff-9d9f-443ba37fc9c0") : object "openshift-multus"/"metrics-daemon-secret" not registered Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.642423 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.657103 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.665521 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvqst\" (UniqueName: \"kubernetes.io/projected/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-kube-api-access-bvqst\") pod \"network-metrics-daemon-4p4hg\" (UID: \"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\") " pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.668162 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.668198 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.668211 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.668238 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.668253 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:54Z","lastTransitionTime":"2025-09-30T16:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.676610 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.697552 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.718739 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fce7a982-d6f8-46fc-94e2-c029a2b439c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd8d3f0c4fcccedb06fbc60c7a25bcf4e89f76327f03e068f83727d88a49a1e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e9cc726587a32bf3541a30a6b1ad2d189fdb4585fd5a17f84b6b6b2ac77eb6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-j579t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.733846 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.757118 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5eabfebb1b400bed53b275a9853c4c3491ab5f85c7794b6aba2e858026734e45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T16:59:48Z\\\",\\\"message\\\":\\\"qos/v1/apis/informers/externalversions/factory.go:140\\\\nI0930 16:59:48.770772 6107 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0930 16:59:48.770795 6107 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0930 16:59:48.770828 6107 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0930 16:59:48.770834 6107 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0930 16:59:48.770910 6107 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0930 16:59:48.770945 6107 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0930 16:59:48.770966 6107 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0930 16:59:48.770972 6107 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0930 16:59:48.771007 6107 factory.go:656] Stopping watch factory\\\\nI0930 16:59:48.771022 6107 ovnkube.go:599] Stopped ovnkube\\\\nI0930 16:59:48.771055 6107 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0930 16:59:48.771070 6107 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0930 16:59:48.771078 6107 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0930 16:59:48.771086 6107 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0930 16:59:48.771093 6107 handler.go:208] Removed *v1.Node event handler 2\\\\nI09\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"ector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188090 6268 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188238 6268 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188287 6268 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188418 6268 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188628 6268 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188772 6268 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188844 6268 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.189205 6268 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0930 16:59:50.189260 6268 factory.go:656] Stopping watch factory\\\\nI0930 16:59:50.189288 6268 ovnkube.go:599] Stopped ovnkube\\\\nI0930 16:59:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.771062 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.771144 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.771158 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.771183 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.771198 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:54Z","lastTransitionTime":"2025-09-30T16:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.775234 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4p4hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4p4hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.797776 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.817915 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.835054 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.851321 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.868338 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.877988 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.878040 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.878081 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.878106 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.878125 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:54Z","lastTransitionTime":"2025-09-30T16:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.889002 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T16:59:54Z is after 2025-08-24T17:21:41Z" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.982166 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.982220 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.982237 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.982261 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:54 crc kubenswrapper[4818]: I0930 16:59:54.982281 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:54Z","lastTransitionTime":"2025-09-30T16:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.020166 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:55 crc kubenswrapper[4818]: E0930 16:59:55.020355 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.086211 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.086274 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.086290 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.086314 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.086330 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:55Z","lastTransitionTime":"2025-09-30T16:59:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.146536 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs\") pod \"network-metrics-daemon-4p4hg\" (UID: \"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\") " pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 16:59:55 crc kubenswrapper[4818]: E0930 16:59:55.146731 4818 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Sep 30 16:59:55 crc kubenswrapper[4818]: E0930 16:59:55.146817 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs podName:3712d08f-58c2-4fff-9d9f-443ba37fc9c0 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:56.146793282 +0000 UTC m=+42.901065128 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs") pod "network-metrics-daemon-4p4hg" (UID: "3712d08f-58c2-4fff-9d9f-443ba37fc9c0") : object "openshift-multus"/"metrics-daemon-secret" not registered Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.190351 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.190450 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.190468 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.190494 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.190512 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:55Z","lastTransitionTime":"2025-09-30T16:59:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.293611 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.293677 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.293695 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.293721 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.293746 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:55Z","lastTransitionTime":"2025-09-30T16:59:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.396912 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.397065 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.397090 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.397122 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.397144 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:55Z","lastTransitionTime":"2025-09-30T16:59:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.500533 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.500579 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.500590 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.500607 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.500618 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:55Z","lastTransitionTime":"2025-09-30T16:59:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.603607 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.603650 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.603662 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.603695 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.603706 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:55Z","lastTransitionTime":"2025-09-30T16:59:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.707360 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.707424 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.707446 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.707479 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.707502 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:55Z","lastTransitionTime":"2025-09-30T16:59:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.810373 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.810419 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.810428 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.810445 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.810456 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:55Z","lastTransitionTime":"2025-09-30T16:59:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.913513 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.913564 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.913577 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.913594 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:55 crc kubenswrapper[4818]: I0930 16:59:55.913607 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:55Z","lastTransitionTime":"2025-09-30T16:59:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.017048 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.017121 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.017147 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.017175 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.017197 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:56Z","lastTransitionTime":"2025-09-30T16:59:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.020310 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.020360 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.020404 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 16:59:56 crc kubenswrapper[4818]: E0930 16:59:56.020477 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 16:59:56 crc kubenswrapper[4818]: E0930 16:59:56.020657 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 16:59:56 crc kubenswrapper[4818]: E0930 16:59:56.020838 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.120654 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.120709 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.120718 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.120734 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.120744 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:56Z","lastTransitionTime":"2025-09-30T16:59:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.156062 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs\") pod \"network-metrics-daemon-4p4hg\" (UID: \"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\") " pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 16:59:56 crc kubenswrapper[4818]: E0930 16:59:56.156231 4818 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Sep 30 16:59:56 crc kubenswrapper[4818]: E0930 16:59:56.156307 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs podName:3712d08f-58c2-4fff-9d9f-443ba37fc9c0 nodeName:}" failed. No retries permitted until 2025-09-30 16:59:58.156286162 +0000 UTC m=+44.910557978 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs") pod "network-metrics-daemon-4p4hg" (UID: "3712d08f-58c2-4fff-9d9f-443ba37fc9c0") : object "openshift-multus"/"metrics-daemon-secret" not registered Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.223978 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.224047 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.224067 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.224095 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.224113 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:56Z","lastTransitionTime":"2025-09-30T16:59:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.327387 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.327458 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.327480 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.327510 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.327527 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:56Z","lastTransitionTime":"2025-09-30T16:59:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.430153 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.430237 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.430263 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.430293 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.430317 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:56Z","lastTransitionTime":"2025-09-30T16:59:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.533233 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.533296 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.533313 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.533336 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.533357 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:56Z","lastTransitionTime":"2025-09-30T16:59:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.636024 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.636080 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.636102 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.636126 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.636145 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:56Z","lastTransitionTime":"2025-09-30T16:59:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.739634 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.739699 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.739717 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.739741 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.739758 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:56Z","lastTransitionTime":"2025-09-30T16:59:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.842493 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.842552 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.842570 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.842606 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.842642 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:56Z","lastTransitionTime":"2025-09-30T16:59:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.946568 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.946625 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.946641 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.946664 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:56 crc kubenswrapper[4818]: I0930 16:59:56.946681 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:56Z","lastTransitionTime":"2025-09-30T16:59:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.019713 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:57 crc kubenswrapper[4818]: E0930 16:59:57.019970 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.050289 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.050352 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.050366 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.050389 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.050408 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:57Z","lastTransitionTime":"2025-09-30T16:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.153989 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.154068 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.154090 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.154122 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.154146 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:57Z","lastTransitionTime":"2025-09-30T16:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.257805 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.257899 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.257963 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.258003 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.258028 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:57Z","lastTransitionTime":"2025-09-30T16:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.361380 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.361425 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.361437 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.361454 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.361466 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:57Z","lastTransitionTime":"2025-09-30T16:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.464600 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.464669 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.464693 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.464724 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.464748 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:57Z","lastTransitionTime":"2025-09-30T16:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.568742 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.568805 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.568827 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.568851 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.568869 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:57Z","lastTransitionTime":"2025-09-30T16:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.672520 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.672591 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.672605 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.672629 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.672646 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:57Z","lastTransitionTime":"2025-09-30T16:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.775726 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.775791 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.775806 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.775825 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.775838 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:57Z","lastTransitionTime":"2025-09-30T16:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.878850 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.878901 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.878912 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.878965 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.878977 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:57Z","lastTransitionTime":"2025-09-30T16:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.983284 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.983345 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.983361 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.983385 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:57 crc kubenswrapper[4818]: I0930 16:59:57.983402 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:57Z","lastTransitionTime":"2025-09-30T16:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.020033 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.020106 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.020035 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 16:59:58 crc kubenswrapper[4818]: E0930 16:59:58.020298 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 16:59:58 crc kubenswrapper[4818]: E0930 16:59:58.020475 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 16:59:58 crc kubenswrapper[4818]: E0930 16:59:58.020656 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.086705 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.086743 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.086755 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.086772 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.086784 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:58Z","lastTransitionTime":"2025-09-30T16:59:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.178810 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs\") pod \"network-metrics-daemon-4p4hg\" (UID: \"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\") " pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 16:59:58 crc kubenswrapper[4818]: E0930 16:59:58.178984 4818 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Sep 30 16:59:58 crc kubenswrapper[4818]: E0930 16:59:58.179137 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs podName:3712d08f-58c2-4fff-9d9f-443ba37fc9c0 nodeName:}" failed. No retries permitted until 2025-09-30 17:00:02.179117806 +0000 UTC m=+48.933389622 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs") pod "network-metrics-daemon-4p4hg" (UID: "3712d08f-58c2-4fff-9d9f-443ba37fc9c0") : object "openshift-multus"/"metrics-daemon-secret" not registered Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.189461 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.189518 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.189537 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.189560 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.189578 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:58Z","lastTransitionTime":"2025-09-30T16:59:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.292534 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.292593 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.292610 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.292639 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.292657 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:58Z","lastTransitionTime":"2025-09-30T16:59:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.396190 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.396251 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.396271 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.396296 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.396315 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:58Z","lastTransitionTime":"2025-09-30T16:59:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.499182 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.499247 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.499269 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.499297 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.499318 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:58Z","lastTransitionTime":"2025-09-30T16:59:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.602503 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.602568 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.602586 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.602612 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.602635 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:58Z","lastTransitionTime":"2025-09-30T16:59:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.705424 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.705505 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.705531 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.705556 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.705576 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:58Z","lastTransitionTime":"2025-09-30T16:59:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.808420 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.808490 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.808507 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.808530 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.808549 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:58Z","lastTransitionTime":"2025-09-30T16:59:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.911259 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.911321 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.911338 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.911364 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:58 crc kubenswrapper[4818]: I0930 16:59:58.911398 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:58Z","lastTransitionTime":"2025-09-30T16:59:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.015162 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.015239 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.015261 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.015292 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.015316 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:59Z","lastTransitionTime":"2025-09-30T16:59:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.019483 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 16:59:59 crc kubenswrapper[4818]: E0930 16:59:59.019655 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.118896 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.119038 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.119060 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.119090 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.119112 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:59Z","lastTransitionTime":"2025-09-30T16:59:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.222536 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.222590 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.222607 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.222631 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.222648 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:59Z","lastTransitionTime":"2025-09-30T16:59:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.325260 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.325331 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.325349 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.325375 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.325397 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:59Z","lastTransitionTime":"2025-09-30T16:59:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.429089 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.429156 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.429173 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.429202 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.429220 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:59Z","lastTransitionTime":"2025-09-30T16:59:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.532226 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.532277 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.532290 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.532313 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.532328 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:59Z","lastTransitionTime":"2025-09-30T16:59:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.634894 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.635060 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.635088 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.635123 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.635145 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:59Z","lastTransitionTime":"2025-09-30T16:59:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.738614 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.738673 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.738690 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.738708 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.738723 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:59Z","lastTransitionTime":"2025-09-30T16:59:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.842465 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.842531 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.842548 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.842575 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.842594 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:59Z","lastTransitionTime":"2025-09-30T16:59:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.946265 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.946346 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.946381 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.946415 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 16:59:59 crc kubenswrapper[4818]: I0930 16:59:59.946436 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T16:59:59Z","lastTransitionTime":"2025-09-30T16:59:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.020072 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.020145 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:00 crc kubenswrapper[4818]: E0930 17:00:00.020230 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.020159 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:00 crc kubenswrapper[4818]: E0930 17:00:00.020336 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:00 crc kubenswrapper[4818]: E0930 17:00:00.020446 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.049002 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.049080 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.049101 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.049125 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.049143 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:00Z","lastTransitionTime":"2025-09-30T17:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.151407 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.151480 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.151498 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.151528 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.151547 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:00Z","lastTransitionTime":"2025-09-30T17:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.254728 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.254802 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.254827 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.254856 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.254875 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:00Z","lastTransitionTime":"2025-09-30T17:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.367468 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.367523 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.367544 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.367572 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.367593 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:00Z","lastTransitionTime":"2025-09-30T17:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.470725 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.470780 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.470791 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.470810 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.470822 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:00Z","lastTransitionTime":"2025-09-30T17:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.573251 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.573292 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.573300 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.573316 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.573325 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:00Z","lastTransitionTime":"2025-09-30T17:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.676326 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.676380 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.676392 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.676411 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.676426 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:00Z","lastTransitionTime":"2025-09-30T17:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.779472 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.779561 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.779585 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.779616 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.779640 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:00Z","lastTransitionTime":"2025-09-30T17:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.883741 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.883804 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.883821 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.883846 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.883864 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:00Z","lastTransitionTime":"2025-09-30T17:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.987578 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.988182 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.988248 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.988264 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.988289 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.988308 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:00Z","lastTransitionTime":"2025-09-30T17:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.989341 4818 scope.go:117] "RemoveContainer" containerID="0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.990043 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.990112 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.990130 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.990154 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:00 crc kubenswrapper[4818]: I0930 17:00:00.990432 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:00Z","lastTransitionTime":"2025-09-30T17:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.014019 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.020689 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:01 crc kubenswrapper[4818]: E0930 17:00:01.021092 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:01 crc kubenswrapper[4818]: E0930 17:00:01.021167 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.034673 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.034743 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.034763 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.034791 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.035175 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:01Z","lastTransitionTime":"2025-09-30T17:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.057727 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"ector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188090 6268 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188238 6268 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188287 6268 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188418 6268 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188628 6268 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188772 6268 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188844 6268 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.189205 6268 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0930 16:59:50.189260 6268 factory.go:656] Stopping watch factory\\\\nI0930 16:59:50.189288 6268 ovnkube.go:599] Stopped ovnkube\\\\nI0930 16:59:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-ljmfd_openshift-ovn-kubernetes(68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: E0930 17:00:01.059829 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.066525 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.066613 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.066633 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.067305 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.067397 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:01Z","lastTransitionTime":"2025-09-30T17:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.073389 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4p4hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4p4hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: E0930 17:00:01.089726 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.093518 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.094684 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.094734 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.094747 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.094766 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.094779 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:01Z","lastTransitionTime":"2025-09-30T17:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.108616 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: E0930 17:00:01.117540 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.122434 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.122497 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.122515 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.122542 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.122561 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:01Z","lastTransitionTime":"2025-09-30T17:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.128742 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: E0930 17:00:01.139386 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: E0930 17:00:01.139618 4818 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.142808 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.142860 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.142872 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.142892 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.142906 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:01Z","lastTransitionTime":"2025-09-30T17:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.143037 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.157459 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.170360 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.183600 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.198392 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.213232 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.232525 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.245264 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.245305 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.245320 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.245342 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.245358 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:01Z","lastTransitionTime":"2025-09-30T17:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.255761 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.278700 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.290424 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fce7a982-d6f8-46fc-94e2-c029a2b439c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd8d3f0c4fcccedb06fbc60c7a25bcf4e89f76327f03e068f83727d88a49a1e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e9cc726587a32bf3541a30a6b1ad2d189fdb4585fd5a17f84b6b6b2ac77eb6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-j579t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.350037 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.350106 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.350129 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.350163 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.350188 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:01Z","lastTransitionTime":"2025-09-30T17:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.362373 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-ljmfd_68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/ovnkube-controller/1.log" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.366613 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerStarted","Data":"e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0"} Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.367263 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.396960 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"ector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188090 6268 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188238 6268 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188287 6268 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188418 6268 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188628 6268 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188772 6268 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188844 6268 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.189205 6268 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0930 16:59:50.189260 6268 factory.go:656] Stopping watch factory\\\\nI0930 16:59:50.189288 6268 ovnkube.go:599] Stopped ovnkube\\\\nI0930 16:59:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T17:00:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.409648 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4p4hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4p4hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.422729 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.441349 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.454709 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.454782 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.454817 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.454843 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.454857 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:01Z","lastTransitionTime":"2025-09-30T17:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.457818 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.475549 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.492556 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.521858 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.554723 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.557334 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.557377 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.557392 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.557411 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.557444 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:01Z","lastTransitionTime":"2025-09-30T17:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.576467 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.602886 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.618332 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.637229 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.650986 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fce7a982-d6f8-46fc-94e2-c029a2b439c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd8d3f0c4fcccedb06fbc60c7a25bcf4e89f76327f03e068f83727d88a49a1e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e9cc726587a32bf3541a30a6b1ad2d189fdb4585fd5a17f84b6b6b2ac77eb6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-j579t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.659556 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.659633 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.659650 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.659675 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.659689 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:01Z","lastTransitionTime":"2025-09-30T17:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.663815 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.678313 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:01Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.762520 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.762573 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.762586 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.762605 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.762620 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:01Z","lastTransitionTime":"2025-09-30T17:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.865767 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.865821 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.865831 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.865848 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.865859 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:01Z","lastTransitionTime":"2025-09-30T17:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.968050 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.968088 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.968098 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.968111 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:01 crc kubenswrapper[4818]: I0930 17:00:01.968121 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:01Z","lastTransitionTime":"2025-09-30T17:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.023338 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:02 crc kubenswrapper[4818]: E0930 17:00:02.023492 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.023746 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:02 crc kubenswrapper[4818]: E0930 17:00:02.023825 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.024291 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:02 crc kubenswrapper[4818]: E0930 17:00:02.024393 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.070905 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.071229 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.071309 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.071396 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.071505 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:02Z","lastTransitionTime":"2025-09-30T17:00:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.174665 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.174727 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.174744 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.174770 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.174787 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:02Z","lastTransitionTime":"2025-09-30T17:00:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.233740 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs\") pod \"network-metrics-daemon-4p4hg\" (UID: \"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\") " pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:02 crc kubenswrapper[4818]: E0930 17:00:02.234057 4818 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Sep 30 17:00:02 crc kubenswrapper[4818]: E0930 17:00:02.234193 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs podName:3712d08f-58c2-4fff-9d9f-443ba37fc9c0 nodeName:}" failed. No retries permitted until 2025-09-30 17:00:10.234159961 +0000 UTC m=+56.988431817 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs") pod "network-metrics-daemon-4p4hg" (UID: "3712d08f-58c2-4fff-9d9f-443ba37fc9c0") : object "openshift-multus"/"metrics-daemon-secret" not registered Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.278160 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.278984 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.279139 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.279242 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.279327 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:02Z","lastTransitionTime":"2025-09-30T17:00:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.374146 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-ljmfd_68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/ovnkube-controller/2.log" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.375244 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-ljmfd_68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/ovnkube-controller/1.log" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.381388 4818 generic.go:334] "Generic (PLEG): container finished" podID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerID="e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0" exitCode=1 Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.381445 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerDied","Data":"e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0"} Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.381496 4818 scope.go:117] "RemoveContainer" containerID="0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.381556 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.381594 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.381608 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.381629 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.381643 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:02Z","lastTransitionTime":"2025-09-30T17:00:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.382654 4818 scope.go:117] "RemoveContainer" containerID="e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0" Sep 30 17:00:02 crc kubenswrapper[4818]: E0930 17:00:02.382916 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-ljmfd_openshift-ovn-kubernetes(68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.408217 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.433093 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.451497 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.470177 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fce7a982-d6f8-46fc-94e2-c029a2b439c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd8d3f0c4fcccedb06fbc60c7a25bcf4e89f76327f03e068f83727d88a49a1e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e9cc726587a32bf3541a30a6b1ad2d189fdb4585fd5a17f84b6b6b2ac77eb6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-j579t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.485182 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.485524 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.485683 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.485915 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.486222 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:02Z","lastTransitionTime":"2025-09-30T17:00:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.487406 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.507289 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.533215 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.551714 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4p4hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4p4hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.568888 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.589708 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.590229 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.590684 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.591109 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.591333 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:02Z","lastTransitionTime":"2025-09-30T17:00:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.590277 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"ector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188090 6268 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188238 6268 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188287 6268 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188418 6268 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188628 6268 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188772 6268 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188844 6268 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.189205 6268 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0930 16:59:50.189260 6268 factory.go:656] Stopping watch factory\\\\nI0930 16:59:50.189288 6268 ovnkube.go:599] Stopped ovnkube\\\\nI0930 16:59:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T17:00:02Z\\\",\\\"message\\\":\\\"-multus\\\\\\\", Name:\\\\\\\"network-metrics-daemon-4p4hg\\\\\\\", UID:\\\\\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\\\\\", APIVersion:\\\\\\\"v1\\\\\\\", ResourceVersion:\\\\\\\"26911\\\\\\\", FieldPath:\\\\\\\"\\\\\\\"}): type: 'Warning' reason: 'ErrorAddingResource' addLogicalPort failed for openshift-multus/network-metrics-daemon-4p4hg: failed to update pod openshift-multus/network-metrics-daemon-4p4hg: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z\\\\nI0930 17:00:02.025323 6488 handler.go:208] Removed *v1.Node event handler 2\\\\nI0930 17:00:02.025999 6488 ovnkube.go:599] Stopped ovnkube\\\\nI0930 17:00:02.026038 6488 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0930 17:00:02.026062 6488 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI0930 17:00:02.026103 6488 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 2.84689ms\\\\nF0930 17:00:02.026110 6488 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T17:00:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.618317 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.637344 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.657075 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.669881 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.689915 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.695203 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.695272 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.695292 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.695320 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.695340 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:02Z","lastTransitionTime":"2025-09-30T17:00:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.704383 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.798755 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.798798 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.798809 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.798824 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.798835 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:02Z","lastTransitionTime":"2025-09-30T17:00:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.901665 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.901737 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.901755 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.901781 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:02 crc kubenswrapper[4818]: I0930 17:00:02.901799 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:02Z","lastTransitionTime":"2025-09-30T17:00:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.005315 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.005388 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.005398 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.005421 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.005434 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:03Z","lastTransitionTime":"2025-09-30T17:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.019973 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:03 crc kubenswrapper[4818]: E0930 17:00:03.020114 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.059751 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.069479 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.080464 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.102982 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e234d51056eb6bac1715d7403677e3530ddd7902697bb841a3ed04f1607607e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"message\\\":\\\"ector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188090 6268 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0930 16:59:50.188238 6268 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188287 6268 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188418 6268 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188628 6268 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188772 6268 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.188844 6268 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0930 16:59:50.189205 6268 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0930 16:59:50.189260 6268 factory.go:656] Stopping watch factory\\\\nI0930 16:59:50.189288 6268 ovnkube.go:599] Stopped ovnkube\\\\nI0930 16:59:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T17:00:02Z\\\",\\\"message\\\":\\\"-multus\\\\\\\", Name:\\\\\\\"network-metrics-daemon-4p4hg\\\\\\\", UID:\\\\\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\\\\\", APIVersion:\\\\\\\"v1\\\\\\\", ResourceVersion:\\\\\\\"26911\\\\\\\", FieldPath:\\\\\\\"\\\\\\\"}): type: 'Warning' reason: 'ErrorAddingResource' addLogicalPort failed for openshift-multus/network-metrics-daemon-4p4hg: failed to update pod openshift-multus/network-metrics-daemon-4p4hg: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z\\\\nI0930 17:00:02.025323 6488 handler.go:208] Removed *v1.Node event handler 2\\\\nI0930 17:00:02.025999 6488 ovnkube.go:599] Stopped ovnkube\\\\nI0930 17:00:02.026038 6488 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0930 17:00:02.026062 6488 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI0930 17:00:02.026103 6488 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 2.84689ms\\\\nF0930 17:00:02.026110 6488 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T17:00:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.108255 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.108332 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.108349 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.108377 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.108397 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:03Z","lastTransitionTime":"2025-09-30T17:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.121347 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4p4hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4p4hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.143600 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.167267 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.187660 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.204274 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.211005 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.211072 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.211086 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.211108 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.211122 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:03Z","lastTransitionTime":"2025-09-30T17:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.222084 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.242027 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.257232 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.274319 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.289023 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.305127 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.313335 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.313390 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.313408 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.313432 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.313450 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:03Z","lastTransitionTime":"2025-09-30T17:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.324840 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.339852 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fce7a982-d6f8-46fc-94e2-c029a2b439c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd8d3f0c4fcccedb06fbc60c7a25bcf4e89f76327f03e068f83727d88a49a1e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e9cc726587a32bf3541a30a6b1ad2d189fdb4585fd5a17f84b6b6b2ac77eb6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-j579t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.356311 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.387704 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-ljmfd_68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/ovnkube-controller/2.log" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.392331 4818 scope.go:117] "RemoveContainer" containerID="e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0" Sep 30 17:00:03 crc kubenswrapper[4818]: E0930 17:00:03.392473 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-ljmfd_openshift-ovn-kubernetes(68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.415838 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.416256 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.416386 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.416523 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.416642 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:03Z","lastTransitionTime":"2025-09-30T17:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.422671 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T17:00:02Z\\\",\\\"message\\\":\\\"-multus\\\\\\\", Name:\\\\\\\"network-metrics-daemon-4p4hg\\\\\\\", UID:\\\\\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\\\\\", APIVersion:\\\\\\\"v1\\\\\\\", ResourceVersion:\\\\\\\"26911\\\\\\\", FieldPath:\\\\\\\"\\\\\\\"}): type: 'Warning' reason: 'ErrorAddingResource' addLogicalPort failed for openshift-multus/network-metrics-daemon-4p4hg: failed to update pod openshift-multus/network-metrics-daemon-4p4hg: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z\\\\nI0930 17:00:02.025323 6488 handler.go:208] Removed *v1.Node event handler 2\\\\nI0930 17:00:02.025999 6488 ovnkube.go:599] Stopped ovnkube\\\\nI0930 17:00:02.026038 6488 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0930 17:00:02.026062 6488 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI0930 17:00:02.026103 6488 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 2.84689ms\\\\nF0930 17:00:02.026110 6488 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T17:00:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-ljmfd_openshift-ovn-kubernetes(68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.438730 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4p4hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4p4hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.462250 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.478419 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.493381 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.508294 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.518710 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.518752 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.518762 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.518778 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.518792 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:03Z","lastTransitionTime":"2025-09-30T17:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.520874 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.537491 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.555294 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.572702 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.587043 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.600390 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0f3ebad-fcdf-44a6-8c6e-6d22c065686d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec96e12decace799896cf4030dcf1991b883886f0253431d9147b9116f45d138\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://592d9003e5053cfe8e19b78a635b0d51105ade6a20239c70a991e8ce3b5b03cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://493d029221297680d7b283e5ffe3d751cd0d58163d87a15519924b99632ef162\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.620556 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.621815 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.621870 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.621888 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.621909 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.621943 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:03Z","lastTransitionTime":"2025-09-30T17:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.642597 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.655146 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fce7a982-d6f8-46fc-94e2-c029a2b439c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd8d3f0c4fcccedb06fbc60c7a25bcf4e89f76327f03e068f83727d88a49a1e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e9cc726587a32bf3541a30a6b1ad2d189fdb4585fd5a17f84b6b6b2ac77eb6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-j579t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.672404 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.690520 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:03Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.725401 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.725442 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.725454 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.725473 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.725490 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:03Z","lastTransitionTime":"2025-09-30T17:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.828480 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.829017 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.829213 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.829426 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.829610 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:03Z","lastTransitionTime":"2025-09-30T17:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.932717 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.933160 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.933317 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.933517 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:03 crc kubenswrapper[4818]: I0930 17:00:03.933711 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:03Z","lastTransitionTime":"2025-09-30T17:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.019629 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.019717 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:04 crc kubenswrapper[4818]: E0930 17:00:04.020406 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.019722 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:04 crc kubenswrapper[4818]: E0930 17:00:04.020649 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:04 crc kubenswrapper[4818]: E0930 17:00:04.020270 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.037434 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.037541 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.037560 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.037585 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.037602 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:04Z","lastTransitionTime":"2025-09-30T17:00:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.037673 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:04Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.070032 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T17:00:02Z\\\",\\\"message\\\":\\\"-multus\\\\\\\", Name:\\\\\\\"network-metrics-daemon-4p4hg\\\\\\\", UID:\\\\\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\\\\\", APIVersion:\\\\\\\"v1\\\\\\\", ResourceVersion:\\\\\\\"26911\\\\\\\", FieldPath:\\\\\\\"\\\\\\\"}): type: 'Warning' reason: 'ErrorAddingResource' addLogicalPort failed for openshift-multus/network-metrics-daemon-4p4hg: failed to update pod openshift-multus/network-metrics-daemon-4p4hg: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z\\\\nI0930 17:00:02.025323 6488 handler.go:208] Removed *v1.Node event handler 2\\\\nI0930 17:00:02.025999 6488 ovnkube.go:599] Stopped ovnkube\\\\nI0930 17:00:02.026038 6488 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0930 17:00:02.026062 6488 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI0930 17:00:02.026103 6488 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 2.84689ms\\\\nF0930 17:00:02.026110 6488 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T17:00:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-ljmfd_openshift-ovn-kubernetes(68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:04Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.088245 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4p4hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4p4hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:04Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.108694 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:04Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.132284 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:04Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.140670 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.140719 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.140737 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.140764 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.140783 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:04Z","lastTransitionTime":"2025-09-30T17:00:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.156164 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:04Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.178681 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:04Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.196613 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:04Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.219984 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:04Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.241982 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:04Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.244493 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.244525 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.244537 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.244553 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.244566 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:04Z","lastTransitionTime":"2025-09-30T17:00:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.261916 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:04Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.284678 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:04Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.305383 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0f3ebad-fcdf-44a6-8c6e-6d22c065686d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec96e12decace799896cf4030dcf1991b883886f0253431d9147b9116f45d138\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://592d9003e5053cfe8e19b78a635b0d51105ade6a20239c70a991e8ce3b5b03cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://493d029221297680d7b283e5ffe3d751cd0d58163d87a15519924b99632ef162\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:04Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.328493 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:04Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.346688 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.346984 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.347113 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.347206 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.347403 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:04Z","lastTransitionTime":"2025-09-30T17:00:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.347649 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:04Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.360534 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fce7a982-d6f8-46fc-94e2-c029a2b439c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd8d3f0c4fcccedb06fbc60c7a25bcf4e89f76327f03e068f83727d88a49a1e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e9cc726587a32bf3541a30a6b1ad2d189fdb4585fd5a17f84b6b6b2ac77eb6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-j579t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:04Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.379130 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:04Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.450584 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.450645 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.450662 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.450686 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.450705 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:04Z","lastTransitionTime":"2025-09-30T17:00:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.553036 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.553095 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.553108 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.553128 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.553140 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:04Z","lastTransitionTime":"2025-09-30T17:00:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.656753 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.656816 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.656833 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.656858 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.656875 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:04Z","lastTransitionTime":"2025-09-30T17:00:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.760272 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.760353 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.760369 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.760426 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.760441 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:04Z","lastTransitionTime":"2025-09-30T17:00:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.863905 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.863988 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.864006 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.864030 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.864046 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:04Z","lastTransitionTime":"2025-09-30T17:00:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.967712 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.967756 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.967770 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.967790 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:04 crc kubenswrapper[4818]: I0930 17:00:04.967802 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:04Z","lastTransitionTime":"2025-09-30T17:00:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.019359 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:05 crc kubenswrapper[4818]: E0930 17:00:05.019577 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.070653 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.070736 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.070754 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.070785 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.070807 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:05Z","lastTransitionTime":"2025-09-30T17:00:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.173868 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.173950 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.173968 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.173991 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.174013 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:05Z","lastTransitionTime":"2025-09-30T17:00:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.276542 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.276619 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.276645 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.276677 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.276698 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:05Z","lastTransitionTime":"2025-09-30T17:00:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.380814 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.380885 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.380912 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.380981 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.381004 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:05Z","lastTransitionTime":"2025-09-30T17:00:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.485028 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.485101 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.485124 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.485154 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.485176 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:05Z","lastTransitionTime":"2025-09-30T17:00:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.588658 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.589165 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.589400 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.589614 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.589815 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:05Z","lastTransitionTime":"2025-09-30T17:00:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.694132 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.694215 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.694239 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.694274 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.694296 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:05Z","lastTransitionTime":"2025-09-30T17:00:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.798215 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.798305 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.798323 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.798355 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.798380 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:05Z","lastTransitionTime":"2025-09-30T17:00:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.879525 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:00:05 crc kubenswrapper[4818]: E0930 17:00:05.879737 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:00:37.879697879 +0000 UTC m=+84.633969725 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.879815 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.879963 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:05 crc kubenswrapper[4818]: E0930 17:00:05.879977 4818 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Sep 30 17:00:05 crc kubenswrapper[4818]: E0930 17:00:05.880062 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-30 17:00:37.880039277 +0000 UTC m=+84.634311093 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Sep 30 17:00:05 crc kubenswrapper[4818]: E0930 17:00:05.880186 4818 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 30 17:00:05 crc kubenswrapper[4818]: E0930 17:00:05.880262 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-30 17:00:37.880247682 +0000 UTC m=+84.634519528 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.906050 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.906104 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.906114 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.906131 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.906141 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:05Z","lastTransitionTime":"2025-09-30T17:00:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.980687 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:05 crc kubenswrapper[4818]: I0930 17:00:05.980760 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:05 crc kubenswrapper[4818]: E0930 17:00:05.981025 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 30 17:00:05 crc kubenswrapper[4818]: E0930 17:00:05.981060 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 30 17:00:05 crc kubenswrapper[4818]: E0930 17:00:05.981081 4818 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 17:00:05 crc kubenswrapper[4818]: E0930 17:00:05.981074 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 30 17:00:05 crc kubenswrapper[4818]: E0930 17:00:05.981131 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 30 17:00:05 crc kubenswrapper[4818]: E0930 17:00:05.981160 4818 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 17:00:05 crc kubenswrapper[4818]: E0930 17:00:05.981163 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-09-30 17:00:37.981140958 +0000 UTC m=+84.735412804 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 17:00:05 crc kubenswrapper[4818]: E0930 17:00:05.981321 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-09-30 17:00:37.981284341 +0000 UTC m=+84.735556197 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.008976 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.009030 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.009049 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.009073 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.009091 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:06Z","lastTransitionTime":"2025-09-30T17:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.019495 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.019541 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.019629 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:06 crc kubenswrapper[4818]: E0930 17:00:06.019835 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:06 crc kubenswrapper[4818]: E0930 17:00:06.020076 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:06 crc kubenswrapper[4818]: E0930 17:00:06.020212 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.113271 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.113322 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.113339 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.113371 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.113402 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:06Z","lastTransitionTime":"2025-09-30T17:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.216744 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.216792 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.216805 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.216823 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.216835 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:06Z","lastTransitionTime":"2025-09-30T17:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.319905 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.319966 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.319977 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.319994 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.320006 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:06Z","lastTransitionTime":"2025-09-30T17:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.423876 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.423989 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.424009 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.424042 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.424065 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:06Z","lastTransitionTime":"2025-09-30T17:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.528018 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.528109 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.528134 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.528210 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.528240 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:06Z","lastTransitionTime":"2025-09-30T17:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.632143 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.632207 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.632259 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.632295 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.632349 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:06Z","lastTransitionTime":"2025-09-30T17:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.735670 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.735721 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.735742 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.735779 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.735803 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:06Z","lastTransitionTime":"2025-09-30T17:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.839184 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.839268 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.839295 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.839331 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.839354 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:06Z","lastTransitionTime":"2025-09-30T17:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.942769 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.942855 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.942879 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.942910 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:06 crc kubenswrapper[4818]: I0930 17:00:06.942974 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:06Z","lastTransitionTime":"2025-09-30T17:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.020206 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:07 crc kubenswrapper[4818]: E0930 17:00:07.020470 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.045613 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.045683 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.045702 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.045726 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.045745 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:07Z","lastTransitionTime":"2025-09-30T17:00:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.148767 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.148829 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.148846 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.148912 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.149001 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:07Z","lastTransitionTime":"2025-09-30T17:00:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.251827 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.251968 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.252000 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.252079 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.252110 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:07Z","lastTransitionTime":"2025-09-30T17:00:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.355830 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.355950 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.356039 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.356125 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.356209 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:07Z","lastTransitionTime":"2025-09-30T17:00:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.459241 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.459308 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.459329 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.459355 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.459373 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:07Z","lastTransitionTime":"2025-09-30T17:00:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.562888 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.562997 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.563017 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.563044 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.563065 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:07Z","lastTransitionTime":"2025-09-30T17:00:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.666204 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.666262 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.666283 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.666307 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.666325 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:07Z","lastTransitionTime":"2025-09-30T17:00:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.769657 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.769726 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.769742 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.769769 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.769786 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:07Z","lastTransitionTime":"2025-09-30T17:00:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.873144 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.873204 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.873226 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.873255 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.873276 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:07Z","lastTransitionTime":"2025-09-30T17:00:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.977164 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.977235 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.977259 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.977283 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:07 crc kubenswrapper[4818]: I0930 17:00:07.977300 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:07Z","lastTransitionTime":"2025-09-30T17:00:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.020269 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.020296 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:08 crc kubenswrapper[4818]: E0930 17:00:08.020471 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.020484 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:08 crc kubenswrapper[4818]: E0930 17:00:08.020577 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:08 crc kubenswrapper[4818]: E0930 17:00:08.020913 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.079593 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.079648 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.079660 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.079682 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.079695 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:08Z","lastTransitionTime":"2025-09-30T17:00:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.183624 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.183761 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.183782 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.183813 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.183842 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:08Z","lastTransitionTime":"2025-09-30T17:00:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.286752 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.286829 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.286852 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.286882 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.286904 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:08Z","lastTransitionTime":"2025-09-30T17:00:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.389870 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.389953 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.389973 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.389998 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.390016 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:08Z","lastTransitionTime":"2025-09-30T17:00:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.494240 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.494578 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.494715 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.494842 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.495004 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:08Z","lastTransitionTime":"2025-09-30T17:00:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.598703 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.599176 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.599319 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.599458 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.599622 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:08Z","lastTransitionTime":"2025-09-30T17:00:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.702757 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.702821 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.702843 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.702871 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.702892 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:08Z","lastTransitionTime":"2025-09-30T17:00:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.805610 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.805679 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.805696 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.805721 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.805742 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:08Z","lastTransitionTime":"2025-09-30T17:00:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.909768 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.909833 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.909855 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.909886 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:08 crc kubenswrapper[4818]: I0930 17:00:08.909906 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:08Z","lastTransitionTime":"2025-09-30T17:00:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.014314 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.014380 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.014400 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.014429 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.014450 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:09Z","lastTransitionTime":"2025-09-30T17:00:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.020056 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:09 crc kubenswrapper[4818]: E0930 17:00:09.020218 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.118055 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.118383 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.118558 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.118707 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.118844 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:09Z","lastTransitionTime":"2025-09-30T17:00:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.222313 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.222372 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.222390 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.222415 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.222432 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:09Z","lastTransitionTime":"2025-09-30T17:00:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.325999 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.326209 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.326375 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.326502 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.326799 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:09Z","lastTransitionTime":"2025-09-30T17:00:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.430723 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.431181 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.431212 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.431257 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.431305 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:09Z","lastTransitionTime":"2025-09-30T17:00:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.533822 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.533887 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.533911 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.533969 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.533992 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:09Z","lastTransitionTime":"2025-09-30T17:00:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.636456 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.636500 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.636509 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.636524 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.636534 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:09Z","lastTransitionTime":"2025-09-30T17:00:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.739155 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.739542 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.739720 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.739914 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.740107 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:09Z","lastTransitionTime":"2025-09-30T17:00:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.843212 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.843257 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.843269 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.843288 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.843302 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:09Z","lastTransitionTime":"2025-09-30T17:00:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.946475 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.946520 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.946531 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.946548 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:09 crc kubenswrapper[4818]: I0930 17:00:09.946559 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:09Z","lastTransitionTime":"2025-09-30T17:00:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.020502 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:10 crc kubenswrapper[4818]: E0930 17:00:10.020730 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.020860 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.020990 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:10 crc kubenswrapper[4818]: E0930 17:00:10.021138 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:10 crc kubenswrapper[4818]: E0930 17:00:10.021251 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.049957 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.050048 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.050065 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.050083 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.050097 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:10Z","lastTransitionTime":"2025-09-30T17:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.153122 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.153177 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.153199 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.153232 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.153254 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:10Z","lastTransitionTime":"2025-09-30T17:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.256639 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.256763 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.256782 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.256806 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.256822 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:10Z","lastTransitionTime":"2025-09-30T17:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.333072 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs\") pod \"network-metrics-daemon-4p4hg\" (UID: \"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\") " pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:10 crc kubenswrapper[4818]: E0930 17:00:10.333256 4818 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Sep 30 17:00:10 crc kubenswrapper[4818]: E0930 17:00:10.333585 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs podName:3712d08f-58c2-4fff-9d9f-443ba37fc9c0 nodeName:}" failed. No retries permitted until 2025-09-30 17:00:26.33354713 +0000 UTC m=+73.087818977 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs") pod "network-metrics-daemon-4p4hg" (UID: "3712d08f-58c2-4fff-9d9f-443ba37fc9c0") : object "openshift-multus"/"metrics-daemon-secret" not registered Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.360990 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.361410 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.361437 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.361465 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.361486 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:10Z","lastTransitionTime":"2025-09-30T17:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.464397 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.464460 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.464478 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.464506 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.464523 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:10Z","lastTransitionTime":"2025-09-30T17:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.567856 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.567908 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.567957 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.567990 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.568012 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:10Z","lastTransitionTime":"2025-09-30T17:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.671135 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.671207 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.671230 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.671260 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.671284 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:10Z","lastTransitionTime":"2025-09-30T17:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.773978 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.774015 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.774026 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.774041 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.774052 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:10Z","lastTransitionTime":"2025-09-30T17:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.877284 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.877520 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.877685 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.877761 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.877823 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:10Z","lastTransitionTime":"2025-09-30T17:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.980715 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.980841 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.980869 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.980898 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:10 crc kubenswrapper[4818]: I0930 17:00:10.980999 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:10Z","lastTransitionTime":"2025-09-30T17:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.020289 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:11 crc kubenswrapper[4818]: E0930 17:00:11.020480 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.084434 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.084499 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.084516 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.084544 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.084562 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:11Z","lastTransitionTime":"2025-09-30T17:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.159400 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.159478 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.159506 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.159540 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.159565 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:11Z","lastTransitionTime":"2025-09-30T17:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:11 crc kubenswrapper[4818]: E0930 17:00:11.177915 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:11Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.183213 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.183272 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.183290 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.183315 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.183332 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:11Z","lastTransitionTime":"2025-09-30T17:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:11 crc kubenswrapper[4818]: E0930 17:00:11.204508 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:11Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.209983 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.210016 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.210027 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.210044 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.210056 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:11Z","lastTransitionTime":"2025-09-30T17:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:11 crc kubenswrapper[4818]: E0930 17:00:11.229223 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:11Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.233841 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.233878 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.233887 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.233902 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.233913 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:11Z","lastTransitionTime":"2025-09-30T17:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:11 crc kubenswrapper[4818]: E0930 17:00:11.254339 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:11Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.260752 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.260817 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.260841 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.260868 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.260888 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:11Z","lastTransitionTime":"2025-09-30T17:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:11 crc kubenswrapper[4818]: E0930 17:00:11.280833 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:11Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:11 crc kubenswrapper[4818]: E0930 17:00:11.281063 4818 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.283442 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.283523 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.283542 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.283569 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.283587 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:11Z","lastTransitionTime":"2025-09-30T17:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.386003 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.386073 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.386098 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.386129 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.386152 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:11Z","lastTransitionTime":"2025-09-30T17:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.489156 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.489197 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.489209 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.489227 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.489241 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:11Z","lastTransitionTime":"2025-09-30T17:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.592233 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.592286 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.592378 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.592405 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.592503 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:11Z","lastTransitionTime":"2025-09-30T17:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.695388 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.695447 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.695464 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.695489 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.695506 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:11Z","lastTransitionTime":"2025-09-30T17:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.798412 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.798459 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.798470 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.798490 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.798501 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:11Z","lastTransitionTime":"2025-09-30T17:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.901277 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.901348 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.901366 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.901404 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:11 crc kubenswrapper[4818]: I0930 17:00:11.901428 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:11Z","lastTransitionTime":"2025-09-30T17:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.005305 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.005359 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.005376 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.005405 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.005423 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:12Z","lastTransitionTime":"2025-09-30T17:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.019761 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.019864 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:12 crc kubenswrapper[4818]: E0930 17:00:12.019999 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:12 crc kubenswrapper[4818]: E0930 17:00:12.020151 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.020305 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:12 crc kubenswrapper[4818]: E0930 17:00:12.020413 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.108731 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.108781 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.108797 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.108824 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.108841 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:12Z","lastTransitionTime":"2025-09-30T17:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.212884 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.212971 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.212988 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.213010 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.213028 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:12Z","lastTransitionTime":"2025-09-30T17:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.315573 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.315909 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.316494 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.316667 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.316804 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:12Z","lastTransitionTime":"2025-09-30T17:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.420970 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.421037 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.421062 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.421102 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.421120 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:12Z","lastTransitionTime":"2025-09-30T17:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.524514 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.524589 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.524608 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.524634 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.524652 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:12Z","lastTransitionTime":"2025-09-30T17:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.627814 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.628227 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.628357 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.628552 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.628980 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:12Z","lastTransitionTime":"2025-09-30T17:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.731797 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.731908 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.731965 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.731992 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.732009 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:12Z","lastTransitionTime":"2025-09-30T17:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.834595 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.834693 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.834707 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.834725 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.835026 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:12Z","lastTransitionTime":"2025-09-30T17:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.937944 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.937999 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.938011 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.938033 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:12 crc kubenswrapper[4818]: I0930 17:00:12.938051 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:12Z","lastTransitionTime":"2025-09-30T17:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.019767 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:13 crc kubenswrapper[4818]: E0930 17:00:13.020021 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.041019 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.041058 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.041075 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.041099 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.041116 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:13Z","lastTransitionTime":"2025-09-30T17:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.144334 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.144373 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.144384 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.144401 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.144412 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:13Z","lastTransitionTime":"2025-09-30T17:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.246755 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.246840 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.246861 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.246891 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.246910 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:13Z","lastTransitionTime":"2025-09-30T17:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.349320 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.349354 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.349363 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.349377 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.349388 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:13Z","lastTransitionTime":"2025-09-30T17:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.452007 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.452110 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.452127 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.452155 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.452174 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:13Z","lastTransitionTime":"2025-09-30T17:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.555902 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.555967 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.555980 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.555999 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.556013 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:13Z","lastTransitionTime":"2025-09-30T17:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.658845 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.658976 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.659030 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.659055 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.659075 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:13Z","lastTransitionTime":"2025-09-30T17:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.762036 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.762098 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.762120 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.762151 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.762173 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:13Z","lastTransitionTime":"2025-09-30T17:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.865537 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.865591 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.865605 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.865626 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.865642 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:13Z","lastTransitionTime":"2025-09-30T17:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.970039 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.970109 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.970126 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.970150 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:13 crc kubenswrapper[4818]: I0930 17:00:13.970168 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:13Z","lastTransitionTime":"2025-09-30T17:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.020221 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.020275 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.020485 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:14 crc kubenswrapper[4818]: E0930 17:00:14.020570 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:14 crc kubenswrapper[4818]: E0930 17:00:14.020739 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:14 crc kubenswrapper[4818]: E0930 17:00:14.024308 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.051913 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:14Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.071972 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:14Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.074258 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.074469 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.074483 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.074508 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.074525 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:14Z","lastTransitionTime":"2025-09-30T17:00:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.092847 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:14Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.112097 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:14Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.133568 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:14Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.152733 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:14Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.167076 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0f3ebad-fcdf-44a6-8c6e-6d22c065686d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec96e12decace799896cf4030dcf1991b883886f0253431d9147b9116f45d138\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://592d9003e5053cfe8e19b78a635b0d51105ade6a20239c70a991e8ce3b5b03cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://493d029221297680d7b283e5ffe3d751cd0d58163d87a15519924b99632ef162\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:14Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.177821 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.177867 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.177876 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.177893 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.177940 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:14Z","lastTransitionTime":"2025-09-30T17:00:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.188120 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:14Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.205718 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:14Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.223156 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:14Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.242329 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:14Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.262423 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:14Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.281607 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.281728 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.281791 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.281822 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.281842 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:14Z","lastTransitionTime":"2025-09-30T17:00:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.285296 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:14Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.302917 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fce7a982-d6f8-46fc-94e2-c029a2b439c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd8d3f0c4fcccedb06fbc60c7a25bcf4e89f76327f03e068f83727d88a49a1e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e9cc726587a32bf3541a30a6b1ad2d189fdb4585fd5a17f84b6b6b2ac77eb6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-j579t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:14Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.318668 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:14Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.348966 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T17:00:02Z\\\",\\\"message\\\":\\\"-multus\\\\\\\", Name:\\\\\\\"network-metrics-daemon-4p4hg\\\\\\\", UID:\\\\\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\\\\\", APIVersion:\\\\\\\"v1\\\\\\\", ResourceVersion:\\\\\\\"26911\\\\\\\", FieldPath:\\\\\\\"\\\\\\\"}): type: 'Warning' reason: 'ErrorAddingResource' addLogicalPort failed for openshift-multus/network-metrics-daemon-4p4hg: failed to update pod openshift-multus/network-metrics-daemon-4p4hg: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z\\\\nI0930 17:00:02.025323 6488 handler.go:208] Removed *v1.Node event handler 2\\\\nI0930 17:00:02.025999 6488 ovnkube.go:599] Stopped ovnkube\\\\nI0930 17:00:02.026038 6488 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0930 17:00:02.026062 6488 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI0930 17:00:02.026103 6488 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 2.84689ms\\\\nF0930 17:00:02.026110 6488 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T17:00:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-ljmfd_openshift-ovn-kubernetes(68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:14Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.367523 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4p4hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4p4hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:14Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.385035 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.385085 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.385098 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.385120 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.385133 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:14Z","lastTransitionTime":"2025-09-30T17:00:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.487709 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.487753 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.487764 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.487781 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.487793 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:14Z","lastTransitionTime":"2025-09-30T17:00:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.590601 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.590653 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.590667 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.590685 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.590701 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:14Z","lastTransitionTime":"2025-09-30T17:00:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.693475 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.693535 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.693555 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.693581 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.693600 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:14Z","lastTransitionTime":"2025-09-30T17:00:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.796750 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.797274 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.797475 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.797672 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.797896 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:14Z","lastTransitionTime":"2025-09-30T17:00:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.901241 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.901378 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.901400 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.901432 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:14 crc kubenswrapper[4818]: I0930 17:00:14.901452 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:14Z","lastTransitionTime":"2025-09-30T17:00:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.005131 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.005555 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.005579 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.005607 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.005628 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:15Z","lastTransitionTime":"2025-09-30T17:00:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.019823 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:15 crc kubenswrapper[4818]: E0930 17:00:15.020015 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.109321 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.110207 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.110240 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.110267 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.110284 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:15Z","lastTransitionTime":"2025-09-30T17:00:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.213177 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.213260 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.213295 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.213323 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.213341 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:15Z","lastTransitionTime":"2025-09-30T17:00:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.316517 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.316581 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.316598 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.316624 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.316643 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:15Z","lastTransitionTime":"2025-09-30T17:00:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.420009 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.420101 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.420158 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.420182 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.420200 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:15Z","lastTransitionTime":"2025-09-30T17:00:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.523559 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.523644 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.523668 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.523702 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.523726 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:15Z","lastTransitionTime":"2025-09-30T17:00:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.626795 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.626882 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.626903 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.626968 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.626991 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:15Z","lastTransitionTime":"2025-09-30T17:00:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.729838 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.729908 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.729959 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.729979 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.729994 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:15Z","lastTransitionTime":"2025-09-30T17:00:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.833523 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.833602 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.833625 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.833659 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.833680 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:15Z","lastTransitionTime":"2025-09-30T17:00:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.937221 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.937260 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.937271 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.937286 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:15 crc kubenswrapper[4818]: I0930 17:00:15.937299 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:15Z","lastTransitionTime":"2025-09-30T17:00:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.019440 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.019490 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.019553 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:16 crc kubenswrapper[4818]: E0930 17:00:16.019715 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:16 crc kubenswrapper[4818]: E0930 17:00:16.019812 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:16 crc kubenswrapper[4818]: E0930 17:00:16.019900 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.040192 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.040229 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.040238 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.040252 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.040262 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:16Z","lastTransitionTime":"2025-09-30T17:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.143500 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.143551 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.143567 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.143591 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.143609 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:16Z","lastTransitionTime":"2025-09-30T17:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.246412 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.246498 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.246527 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.246562 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.246588 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:16Z","lastTransitionTime":"2025-09-30T17:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.350032 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.350081 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.350099 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.350121 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.350141 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:16Z","lastTransitionTime":"2025-09-30T17:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.453055 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.453120 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.453143 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.453176 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.453199 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:16Z","lastTransitionTime":"2025-09-30T17:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.556366 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.556402 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.556414 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.556431 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.556442 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:16Z","lastTransitionTime":"2025-09-30T17:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.659648 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.659698 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.659708 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.659723 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.659734 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:16Z","lastTransitionTime":"2025-09-30T17:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.766432 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.766477 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.766489 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.766506 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.766518 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:16Z","lastTransitionTime":"2025-09-30T17:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.869822 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.869859 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.869868 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.869883 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.869894 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:16Z","lastTransitionTime":"2025-09-30T17:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.972052 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.972087 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.972101 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.972116 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:16 crc kubenswrapper[4818]: I0930 17:00:16.972125 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:16Z","lastTransitionTime":"2025-09-30T17:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.019752 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:17 crc kubenswrapper[4818]: E0930 17:00:17.019881 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.020774 4818 scope.go:117] "RemoveContainer" containerID="e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0" Sep 30 17:00:17 crc kubenswrapper[4818]: E0930 17:00:17.021051 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-ljmfd_openshift-ovn-kubernetes(68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.075110 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.075163 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.075182 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.075205 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.075222 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:17Z","lastTransitionTime":"2025-09-30T17:00:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.177663 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.177691 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.177702 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.177714 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.177723 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:17Z","lastTransitionTime":"2025-09-30T17:00:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.281353 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.281413 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.281429 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.281449 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.281465 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:17Z","lastTransitionTime":"2025-09-30T17:00:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.384457 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.384729 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.384821 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.384907 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.385029 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:17Z","lastTransitionTime":"2025-09-30T17:00:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.487572 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.487614 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.487624 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.487638 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.487652 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:17Z","lastTransitionTime":"2025-09-30T17:00:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.590947 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.590991 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.591003 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.591020 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.591032 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:17Z","lastTransitionTime":"2025-09-30T17:00:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.693482 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.693555 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.693579 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.693610 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.693632 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:17Z","lastTransitionTime":"2025-09-30T17:00:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.796771 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.796847 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.796869 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.796897 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.796918 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:17Z","lastTransitionTime":"2025-09-30T17:00:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.899778 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.899824 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.899837 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.899853 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:17 crc kubenswrapper[4818]: I0930 17:00:17.899863 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:17Z","lastTransitionTime":"2025-09-30T17:00:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.002834 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.002912 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.002960 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.002985 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.003004 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:18Z","lastTransitionTime":"2025-09-30T17:00:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.020337 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.020354 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.020540 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:18 crc kubenswrapper[4818]: E0930 17:00:18.021357 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:18 crc kubenswrapper[4818]: E0930 17:00:18.021488 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:18 crc kubenswrapper[4818]: E0930 17:00:18.021767 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.107672 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.107773 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.107835 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.107865 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.107906 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:18Z","lastTransitionTime":"2025-09-30T17:00:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.210776 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.210814 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.210828 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.210844 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.210856 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:18Z","lastTransitionTime":"2025-09-30T17:00:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.314025 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.314081 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.314133 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.314159 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.314175 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:18Z","lastTransitionTime":"2025-09-30T17:00:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.417300 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.417341 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.417353 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.417374 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.417388 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:18Z","lastTransitionTime":"2025-09-30T17:00:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.519966 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.519994 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.520002 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.520015 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.520025 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:18Z","lastTransitionTime":"2025-09-30T17:00:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.622094 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.622120 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.622127 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.622140 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.622149 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:18Z","lastTransitionTime":"2025-09-30T17:00:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.724629 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.724693 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.724710 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.724737 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.724756 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:18Z","lastTransitionTime":"2025-09-30T17:00:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.827448 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.827492 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.827503 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.827519 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.827530 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:18Z","lastTransitionTime":"2025-09-30T17:00:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.930257 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.930339 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.930350 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.930374 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:18 crc kubenswrapper[4818]: I0930 17:00:18.930384 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:18Z","lastTransitionTime":"2025-09-30T17:00:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.020193 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:19 crc kubenswrapper[4818]: E0930 17:00:19.020360 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.033231 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.033265 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.033276 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.033297 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.033311 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:19Z","lastTransitionTime":"2025-09-30T17:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.136447 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.136481 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.136490 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.136503 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.136515 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:19Z","lastTransitionTime":"2025-09-30T17:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.239552 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.239600 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.239622 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.239641 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.239653 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:19Z","lastTransitionTime":"2025-09-30T17:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.342586 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.342679 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.342714 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.342745 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.342766 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:19Z","lastTransitionTime":"2025-09-30T17:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.445333 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.445383 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.445392 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.445407 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.445423 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:19Z","lastTransitionTime":"2025-09-30T17:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.547624 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.547682 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.547693 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.547716 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.547733 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:19Z","lastTransitionTime":"2025-09-30T17:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.650710 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.650763 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.650774 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.650793 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.650809 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:19Z","lastTransitionTime":"2025-09-30T17:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.753634 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.753682 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.753698 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.753723 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.753745 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:19Z","lastTransitionTime":"2025-09-30T17:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.856389 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.856441 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.856456 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.856476 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.856489 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:19Z","lastTransitionTime":"2025-09-30T17:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.958821 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.958868 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.958880 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.958898 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:19 crc kubenswrapper[4818]: I0930 17:00:19.958911 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:19Z","lastTransitionTime":"2025-09-30T17:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.020318 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:20 crc kubenswrapper[4818]: E0930 17:00:20.020578 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.020870 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:20 crc kubenswrapper[4818]: E0930 17:00:20.020978 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.021138 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:20 crc kubenswrapper[4818]: E0930 17:00:20.021221 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.060663 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.060693 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.060702 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.060716 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.060725 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:20Z","lastTransitionTime":"2025-09-30T17:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.163526 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.163557 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.163564 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.163578 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.163587 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:20Z","lastTransitionTime":"2025-09-30T17:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.266554 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.266618 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.266635 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.266672 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.266689 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:20Z","lastTransitionTime":"2025-09-30T17:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.369247 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.369322 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.369345 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.369376 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.369400 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:20Z","lastTransitionTime":"2025-09-30T17:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.472299 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.472354 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.472379 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.472410 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.472432 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:20Z","lastTransitionTime":"2025-09-30T17:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.575315 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.575369 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.575385 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.575411 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.575428 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:20Z","lastTransitionTime":"2025-09-30T17:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.678563 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.678619 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.678629 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.678648 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.678658 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:20Z","lastTransitionTime":"2025-09-30T17:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.782244 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.782284 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.782293 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.782310 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.782319 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:20Z","lastTransitionTime":"2025-09-30T17:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.884989 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.885044 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.885055 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.885075 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.885109 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:20Z","lastTransitionTime":"2025-09-30T17:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.988305 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.988372 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.988385 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.988402 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:20 crc kubenswrapper[4818]: I0930 17:00:20.988432 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:20Z","lastTransitionTime":"2025-09-30T17:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.020345 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:21 crc kubenswrapper[4818]: E0930 17:00:21.020552 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.091071 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.091135 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.091155 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.091181 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.091200 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:21Z","lastTransitionTime":"2025-09-30T17:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.193713 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.193777 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.193794 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.193819 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.193839 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:21Z","lastTransitionTime":"2025-09-30T17:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.296894 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.296944 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.296954 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.296967 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.296977 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:21Z","lastTransitionTime":"2025-09-30T17:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.399474 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.399530 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.399543 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.399561 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.399576 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:21Z","lastTransitionTime":"2025-09-30T17:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.502024 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.502061 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.502072 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.502088 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.502099 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:21Z","lastTransitionTime":"2025-09-30T17:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.502856 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.502886 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.502896 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.502907 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.502936 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:21Z","lastTransitionTime":"2025-09-30T17:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:21 crc kubenswrapper[4818]: E0930 17:00:21.515361 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:21Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.519788 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.519829 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.519842 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.519860 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.519875 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:21Z","lastTransitionTime":"2025-09-30T17:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:21 crc kubenswrapper[4818]: E0930 17:00:21.534158 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:21Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.538343 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.538394 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.538408 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.538427 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.538441 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:21Z","lastTransitionTime":"2025-09-30T17:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:21 crc kubenswrapper[4818]: E0930 17:00:21.553795 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:21Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.557515 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.557542 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.557556 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.557574 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.557590 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:21Z","lastTransitionTime":"2025-09-30T17:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:21 crc kubenswrapper[4818]: E0930 17:00:21.572437 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:21Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.576328 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.576378 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.576391 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.576410 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.576424 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:21Z","lastTransitionTime":"2025-09-30T17:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:21 crc kubenswrapper[4818]: E0930 17:00:21.593039 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:21Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:21 crc kubenswrapper[4818]: E0930 17:00:21.593540 4818 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.605233 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.605406 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.605499 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.605577 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.605659 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:21Z","lastTransitionTime":"2025-09-30T17:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.708404 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.708435 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.708444 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.708458 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.708467 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:21Z","lastTransitionTime":"2025-09-30T17:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.810580 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.810645 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.810665 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.810691 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.810708 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:21Z","lastTransitionTime":"2025-09-30T17:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.913851 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.913886 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.913895 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.913910 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:21 crc kubenswrapper[4818]: I0930 17:00:21.913919 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:21Z","lastTransitionTime":"2025-09-30T17:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.017101 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.017184 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.017202 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.017227 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.017247 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:22Z","lastTransitionTime":"2025-09-30T17:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.019457 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:22 crc kubenswrapper[4818]: E0930 17:00:22.019655 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.019806 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.019819 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:22 crc kubenswrapper[4818]: E0930 17:00:22.020051 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:22 crc kubenswrapper[4818]: E0930 17:00:22.020388 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.120511 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.120568 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.120587 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.120611 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.120632 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:22Z","lastTransitionTime":"2025-09-30T17:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.223039 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.223089 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.223103 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.223126 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.223145 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:22Z","lastTransitionTime":"2025-09-30T17:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.326109 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.326146 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.326156 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.326171 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.326183 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:22Z","lastTransitionTime":"2025-09-30T17:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.428644 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.428684 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.428700 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.428726 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.428743 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:22Z","lastTransitionTime":"2025-09-30T17:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.531566 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.531613 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.531622 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.531637 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.531647 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:22Z","lastTransitionTime":"2025-09-30T17:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.634082 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.634127 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.634145 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.634165 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.634181 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:22Z","lastTransitionTime":"2025-09-30T17:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.736572 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.736619 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.736628 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.736644 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.736653 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:22Z","lastTransitionTime":"2025-09-30T17:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.839995 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.840038 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.840047 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.840062 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.840071 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:22Z","lastTransitionTime":"2025-09-30T17:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.942270 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.942351 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.942386 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.942420 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:22 crc kubenswrapper[4818]: I0930 17:00:22.942439 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:22Z","lastTransitionTime":"2025-09-30T17:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.019783 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:23 crc kubenswrapper[4818]: E0930 17:00:23.019957 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.044456 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.044515 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.044531 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.044555 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.044572 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:23Z","lastTransitionTime":"2025-09-30T17:00:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.147011 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.147063 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.147076 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.147096 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.147109 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:23Z","lastTransitionTime":"2025-09-30T17:00:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.250650 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.250687 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.250696 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.250710 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.250719 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:23Z","lastTransitionTime":"2025-09-30T17:00:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.352881 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.352933 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.352946 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.352991 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.353004 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:23Z","lastTransitionTime":"2025-09-30T17:00:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.455891 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.456048 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.456066 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.456090 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.456107 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:23Z","lastTransitionTime":"2025-09-30T17:00:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.559618 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.559850 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.559992 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.560086 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.560112 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:23Z","lastTransitionTime":"2025-09-30T17:00:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.661714 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.661751 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.661763 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.661777 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.661789 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:23Z","lastTransitionTime":"2025-09-30T17:00:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.765013 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.765055 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.765065 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.765082 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.765091 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:23Z","lastTransitionTime":"2025-09-30T17:00:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.869061 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.869139 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.869158 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.869188 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.869215 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:23Z","lastTransitionTime":"2025-09-30T17:00:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.978375 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.978457 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.978476 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.978933 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:23 crc kubenswrapper[4818]: I0930 17:00:23.979011 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:23Z","lastTransitionTime":"2025-09-30T17:00:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.019492 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.019506 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:24 crc kubenswrapper[4818]: E0930 17:00:24.019736 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.019511 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:24 crc kubenswrapper[4818]: E0930 17:00:24.019981 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:24 crc kubenswrapper[4818]: E0930 17:00:24.020022 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.034315 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0f3ebad-fcdf-44a6-8c6e-6d22c065686d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec96e12decace799896cf4030dcf1991b883886f0253431d9147b9116f45d138\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://592d9003e5053cfe8e19b78a635b0d51105ade6a20239c70a991e8ce3b5b03cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://493d029221297680d7b283e5ffe3d751cd0d58163d87a15519924b99632ef162\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:24Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.047475 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:24Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.059320 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:24Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.072705 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:24Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.082333 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.082374 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.082388 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.082408 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.082420 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:24Z","lastTransitionTime":"2025-09-30T17:00:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.084544 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:24Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.097398 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:24Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.113049 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:24Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.125034 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fce7a982-d6f8-46fc-94e2-c029a2b439c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd8d3f0c4fcccedb06fbc60c7a25bcf4e89f76327f03e068f83727d88a49a1e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e9cc726587a32bf3541a30a6b1ad2d189fdb4585fd5a17f84b6b6b2ac77eb6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-j579t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:24Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.135143 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:24Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.156219 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T17:00:02Z\\\",\\\"message\\\":\\\"-multus\\\\\\\", Name:\\\\\\\"network-metrics-daemon-4p4hg\\\\\\\", UID:\\\\\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\\\\\", APIVersion:\\\\\\\"v1\\\\\\\", ResourceVersion:\\\\\\\"26911\\\\\\\", FieldPath:\\\\\\\"\\\\\\\"}): type: 'Warning' reason: 'ErrorAddingResource' addLogicalPort failed for openshift-multus/network-metrics-daemon-4p4hg: failed to update pod openshift-multus/network-metrics-daemon-4p4hg: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z\\\\nI0930 17:00:02.025323 6488 handler.go:208] Removed *v1.Node event handler 2\\\\nI0930 17:00:02.025999 6488 ovnkube.go:599] Stopped ovnkube\\\\nI0930 17:00:02.026038 6488 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0930 17:00:02.026062 6488 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI0930 17:00:02.026103 6488 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 2.84689ms\\\\nF0930 17:00:02.026110 6488 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T17:00:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-ljmfd_openshift-ovn-kubernetes(68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:24Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.166266 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4p4hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4p4hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:24Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.179551 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:24Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.185197 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.185272 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.185282 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.185315 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.185324 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:24Z","lastTransitionTime":"2025-09-30T17:00:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.192152 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:24Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.205406 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:24Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.216350 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:24Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.229668 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:24Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.241848 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:24Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.287418 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.287459 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.287468 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.287484 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.287494 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:24Z","lastTransitionTime":"2025-09-30T17:00:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.389981 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.390024 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.390035 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.390051 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.390066 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:24Z","lastTransitionTime":"2025-09-30T17:00:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.491487 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.491525 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.491539 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.491556 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.491566 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:24Z","lastTransitionTime":"2025-09-30T17:00:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.594231 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.594289 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.594309 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.594333 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.594350 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:24Z","lastTransitionTime":"2025-09-30T17:00:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.697325 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.697379 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.697396 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.697414 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.697438 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:24Z","lastTransitionTime":"2025-09-30T17:00:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.800484 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.800514 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.800524 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.800537 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.800547 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:24Z","lastTransitionTime":"2025-09-30T17:00:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.902952 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.902995 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.903004 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.903035 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:24 crc kubenswrapper[4818]: I0930 17:00:24.903049 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:24Z","lastTransitionTime":"2025-09-30T17:00:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.006097 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.006378 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.006456 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.006516 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.006576 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:25Z","lastTransitionTime":"2025-09-30T17:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.019589 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:25 crc kubenswrapper[4818]: E0930 17:00:25.019720 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.109307 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.109353 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.109362 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.109378 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.109387 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:25Z","lastTransitionTime":"2025-09-30T17:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.211388 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.211418 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.211428 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.211443 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.211452 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:25Z","lastTransitionTime":"2025-09-30T17:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.313723 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.313767 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.313783 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.313805 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.313822 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:25Z","lastTransitionTime":"2025-09-30T17:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.416483 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.417139 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.417359 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.417563 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.417750 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:25Z","lastTransitionTime":"2025-09-30T17:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.519804 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.519841 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.519852 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.519869 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.519882 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:25Z","lastTransitionTime":"2025-09-30T17:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.622044 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.622092 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.622107 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.622127 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.622140 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:25Z","lastTransitionTime":"2025-09-30T17:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.723829 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.723861 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.723873 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.723889 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.723899 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:25Z","lastTransitionTime":"2025-09-30T17:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.825596 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.825651 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.825662 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.825677 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.825689 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:25Z","lastTransitionTime":"2025-09-30T17:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.928483 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.928539 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.928556 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.928579 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:25 crc kubenswrapper[4818]: I0930 17:00:25.928596 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:25Z","lastTransitionTime":"2025-09-30T17:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.019989 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.019989 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:26 crc kubenswrapper[4818]: E0930 17:00:26.020184 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.020015 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:26 crc kubenswrapper[4818]: E0930 17:00:26.020348 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:26 crc kubenswrapper[4818]: E0930 17:00:26.020401 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.030486 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.030541 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.030555 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.030574 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.030588 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:26Z","lastTransitionTime":"2025-09-30T17:00:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.134092 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.134136 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.134147 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.134165 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.134177 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:26Z","lastTransitionTime":"2025-09-30T17:00:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.236992 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.237031 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.237041 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.237059 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.237070 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:26Z","lastTransitionTime":"2025-09-30T17:00:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.340285 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.340337 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.340350 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.340369 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.340382 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:26Z","lastTransitionTime":"2025-09-30T17:00:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.412539 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs\") pod \"network-metrics-daemon-4p4hg\" (UID: \"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\") " pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:26 crc kubenswrapper[4818]: E0930 17:00:26.412703 4818 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Sep 30 17:00:26 crc kubenswrapper[4818]: E0930 17:00:26.412753 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs podName:3712d08f-58c2-4fff-9d9f-443ba37fc9c0 nodeName:}" failed. No retries permitted until 2025-09-30 17:00:58.412739335 +0000 UTC m=+105.167011151 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs") pod "network-metrics-daemon-4p4hg" (UID: "3712d08f-58c2-4fff-9d9f-443ba37fc9c0") : object "openshift-multus"/"metrics-daemon-secret" not registered Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.443176 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.443235 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.443253 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.443277 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.443295 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:26Z","lastTransitionTime":"2025-09-30T17:00:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.545968 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.546010 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.546019 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.546034 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.546043 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:26Z","lastTransitionTime":"2025-09-30T17:00:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.648073 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.648116 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.648128 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.648146 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.648158 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:26Z","lastTransitionTime":"2025-09-30T17:00:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.750459 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.750495 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.750508 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.750524 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.750535 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:26Z","lastTransitionTime":"2025-09-30T17:00:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.852249 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.852284 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.852293 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.852307 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.852319 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:26Z","lastTransitionTime":"2025-09-30T17:00:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.955113 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.955168 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.955181 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.955202 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:26 crc kubenswrapper[4818]: I0930 17:00:26.955220 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:26Z","lastTransitionTime":"2025-09-30T17:00:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.020019 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:27 crc kubenswrapper[4818]: E0930 17:00:27.020157 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.059390 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.059561 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.059651 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.059774 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.059905 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:27Z","lastTransitionTime":"2025-09-30T17:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.162692 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.163069 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.163167 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.163416 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.163538 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:27Z","lastTransitionTime":"2025-09-30T17:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.267985 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.268531 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.268672 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.268807 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.268942 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:27Z","lastTransitionTime":"2025-09-30T17:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.372248 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.372282 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.372292 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.372306 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.372317 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:27Z","lastTransitionTime":"2025-09-30T17:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.473447 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hq6j2_d36fce8a-ff27-48bf-be9c-67fc2046136d/kube-multus/0.log" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.473526 4818 generic.go:334] "Generic (PLEG): container finished" podID="d36fce8a-ff27-48bf-be9c-67fc2046136d" containerID="dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6" exitCode=1 Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.473568 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hq6j2" event={"ID":"d36fce8a-ff27-48bf-be9c-67fc2046136d","Type":"ContainerDied","Data":"dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6"} Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.474165 4818 scope.go:117] "RemoveContainer" containerID="dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.475020 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.475042 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.475050 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.475060 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.475068 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:27Z","lastTransitionTime":"2025-09-30T17:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.502164 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:27Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.521329 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:27Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.539628 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:27Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.554814 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:27Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.577027 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:27Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.577741 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.577807 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.577838 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.577865 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.577883 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:27Z","lastTransitionTime":"2025-09-30T17:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.590759 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:27Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.607092 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T17:00:26Z\\\",\\\"message\\\":\\\"2025-09-30T16:59:41+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_546e080d-1dc7-41ef-b705-30ec6ab5a22b\\\\n2025-09-30T16:59:41+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_546e080d-1dc7-41ef-b705-30ec6ab5a22b to /host/opt/cni/bin/\\\\n2025-09-30T16:59:41Z [verbose] multus-daemon started\\\\n2025-09-30T16:59:41Z [verbose] Readiness Indicator file check\\\\n2025-09-30T17:00:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:27Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.621133 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0f3ebad-fcdf-44a6-8c6e-6d22c065686d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec96e12decace799896cf4030dcf1991b883886f0253431d9147b9116f45d138\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://592d9003e5053cfe8e19b78a635b0d51105ade6a20239c70a991e8ce3b5b03cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://493d029221297680d7b283e5ffe3d751cd0d58163d87a15519924b99632ef162\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:27Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.634744 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:27Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.647891 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:27Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.662449 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fce7a982-d6f8-46fc-94e2-c029a2b439c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd8d3f0c4fcccedb06fbc60c7a25bcf4e89f76327f03e068f83727d88a49a1e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e9cc726587a32bf3541a30a6b1ad2d189fdb4585fd5a17f84b6b6b2ac77eb6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-j579t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:27Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.672227 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:27Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.681183 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.681413 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.681630 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.681828 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.682367 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:27Z","lastTransitionTime":"2025-09-30T17:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.682281 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:27Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.696179 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:27Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.706609 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4p4hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4p4hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:27Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.716632 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:27Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.743555 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T17:00:02Z\\\",\\\"message\\\":\\\"-multus\\\\\\\", Name:\\\\\\\"network-metrics-daemon-4p4hg\\\\\\\", UID:\\\\\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\\\\\", APIVersion:\\\\\\\"v1\\\\\\\", ResourceVersion:\\\\\\\"26911\\\\\\\", FieldPath:\\\\\\\"\\\\\\\"}): type: 'Warning' reason: 'ErrorAddingResource' addLogicalPort failed for openshift-multus/network-metrics-daemon-4p4hg: failed to update pod openshift-multus/network-metrics-daemon-4p4hg: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z\\\\nI0930 17:00:02.025323 6488 handler.go:208] Removed *v1.Node event handler 2\\\\nI0930 17:00:02.025999 6488 ovnkube.go:599] Stopped ovnkube\\\\nI0930 17:00:02.026038 6488 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0930 17:00:02.026062 6488 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI0930 17:00:02.026103 6488 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 2.84689ms\\\\nF0930 17:00:02.026110 6488 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T17:00:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-ljmfd_openshift-ovn-kubernetes(68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:27Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.786337 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.786791 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.786941 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.787096 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.787234 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:27Z","lastTransitionTime":"2025-09-30T17:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.889632 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.889681 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.889689 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.889703 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.889712 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:27Z","lastTransitionTime":"2025-09-30T17:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.991974 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.992146 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.992211 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.992278 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:27 crc kubenswrapper[4818]: I0930 17:00:27.992340 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:27Z","lastTransitionTime":"2025-09-30T17:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.022699 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:28 crc kubenswrapper[4818]: E0930 17:00:28.023088 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.023392 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:28 crc kubenswrapper[4818]: E0930 17:00:28.023550 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.023822 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:28 crc kubenswrapper[4818]: E0930 17:00:28.024026 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.095284 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.095653 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.095881 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.096148 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.096396 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:28Z","lastTransitionTime":"2025-09-30T17:00:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.199810 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.200170 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.200360 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.200531 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.200683 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:28Z","lastTransitionTime":"2025-09-30T17:00:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.314578 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.314817 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.314967 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.315094 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.315181 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:28Z","lastTransitionTime":"2025-09-30T17:00:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.418090 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.418432 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.418518 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.418634 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.418761 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:28Z","lastTransitionTime":"2025-09-30T17:00:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.480239 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hq6j2_d36fce8a-ff27-48bf-be9c-67fc2046136d/kube-multus/0.log" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.480309 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hq6j2" event={"ID":"d36fce8a-ff27-48bf-be9c-67fc2046136d","Type":"ContainerStarted","Data":"9b2cdabb8638db6c90e9b3623898192035c1688291bf0f7e0ffbd32f2cd12d35"} Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.502364 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0f3ebad-fcdf-44a6-8c6e-6d22c065686d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec96e12decace799896cf4030dcf1991b883886f0253431d9147b9116f45d138\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://592d9003e5053cfe8e19b78a635b0d51105ade6a20239c70a991e8ce3b5b03cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://493d029221297680d7b283e5ffe3d751cd0d58163d87a15519924b99632ef162\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:28Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.522213 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.522265 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.522284 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.522312 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.522331 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:28Z","lastTransitionTime":"2025-09-30T17:00:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.522435 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:28Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.540037 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:28Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.558690 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b2cdabb8638db6c90e9b3623898192035c1688291bf0f7e0ffbd32f2cd12d35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T17:00:26Z\\\",\\\"message\\\":\\\"2025-09-30T16:59:41+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_546e080d-1dc7-41ef-b705-30ec6ab5a22b\\\\n2025-09-30T16:59:41+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_546e080d-1dc7-41ef-b705-30ec6ab5a22b to /host/opt/cni/bin/\\\\n2025-09-30T16:59:41Z [verbose] multus-daemon started\\\\n2025-09-30T16:59:41Z [verbose] Readiness Indicator file check\\\\n2025-09-30T17:00:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T17:00:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:28Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.575772 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:28Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.590882 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:28Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.609190 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:28Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.622807 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fce7a982-d6f8-46fc-94e2-c029a2b439c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd8d3f0c4fcccedb06fbc60c7a25bcf4e89f76327f03e068f83727d88a49a1e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e9cc726587a32bf3541a30a6b1ad2d189fdb4585fd5a17f84b6b6b2ac77eb6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-j579t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:28Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.625074 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.625135 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.625153 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.625177 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.625195 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:28Z","lastTransitionTime":"2025-09-30T17:00:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.633483 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:28Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.652631 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T17:00:02Z\\\",\\\"message\\\":\\\"-multus\\\\\\\", Name:\\\\\\\"network-metrics-daemon-4p4hg\\\\\\\", UID:\\\\\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\\\\\", APIVersion:\\\\\\\"v1\\\\\\\", ResourceVersion:\\\\\\\"26911\\\\\\\", FieldPath:\\\\\\\"\\\\\\\"}): type: 'Warning' reason: 'ErrorAddingResource' addLogicalPort failed for openshift-multus/network-metrics-daemon-4p4hg: failed to update pod openshift-multus/network-metrics-daemon-4p4hg: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z\\\\nI0930 17:00:02.025323 6488 handler.go:208] Removed *v1.Node event handler 2\\\\nI0930 17:00:02.025999 6488 ovnkube.go:599] Stopped ovnkube\\\\nI0930 17:00:02.026038 6488 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0930 17:00:02.026062 6488 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI0930 17:00:02.026103 6488 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 2.84689ms\\\\nF0930 17:00:02.026110 6488 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T17:00:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-ljmfd_openshift-ovn-kubernetes(68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:28Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.666892 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4p4hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4p4hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:28Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.683803 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:28Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.698991 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:28Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.710679 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:28Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.729061 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.729123 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.729147 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.729175 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.729199 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:28Z","lastTransitionTime":"2025-09-30T17:00:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.730561 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:28Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.748672 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:28Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.766832 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:28Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.831577 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.831625 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.831640 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.831662 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.831680 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:28Z","lastTransitionTime":"2025-09-30T17:00:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.935405 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.935479 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.935500 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.935524 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:28 crc kubenswrapper[4818]: I0930 17:00:28.935542 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:28Z","lastTransitionTime":"2025-09-30T17:00:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.019384 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:29 crc kubenswrapper[4818]: E0930 17:00:29.019530 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.020527 4818 scope.go:117] "RemoveContainer" containerID="e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.037585 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.037637 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.037650 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.037669 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.037730 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:29Z","lastTransitionTime":"2025-09-30T17:00:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.140860 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.141308 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.141331 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.141361 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.141382 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:29Z","lastTransitionTime":"2025-09-30T17:00:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.244502 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.244551 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.244561 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.244578 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.244590 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:29Z","lastTransitionTime":"2025-09-30T17:00:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.347586 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.347668 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.347694 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.347740 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.347763 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:29Z","lastTransitionTime":"2025-09-30T17:00:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.450215 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.450257 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.450268 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.450286 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.450298 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:29Z","lastTransitionTime":"2025-09-30T17:00:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.485933 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-ljmfd_68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/ovnkube-controller/2.log" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.488182 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerStarted","Data":"fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7"} Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.489063 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.507692 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:29Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.526287 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:29Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.541917 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:29Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.552343 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.552400 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.552412 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.552430 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.552798 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:29Z","lastTransitionTime":"2025-09-30T17:00:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.553716 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fce7a982-d6f8-46fc-94e2-c029a2b439c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd8d3f0c4fcccedb06fbc60c7a25bcf4e89f76327f03e068f83727d88a49a1e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e9cc726587a32bf3541a30a6b1ad2d189fdb4585fd5a17f84b6b6b2ac77eb6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-j579t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:29Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.565072 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:29Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.581449 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T17:00:02Z\\\",\\\"message\\\":\\\"-multus\\\\\\\", Name:\\\\\\\"network-metrics-daemon-4p4hg\\\\\\\", UID:\\\\\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\\\\\", APIVersion:\\\\\\\"v1\\\\\\\", ResourceVersion:\\\\\\\"26911\\\\\\\", FieldPath:\\\\\\\"\\\\\\\"}): type: 'Warning' reason: 'ErrorAddingResource' addLogicalPort failed for openshift-multus/network-metrics-daemon-4p4hg: failed to update pod openshift-multus/network-metrics-daemon-4p4hg: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z\\\\nI0930 17:00:02.025323 6488 handler.go:208] Removed *v1.Node event handler 2\\\\nI0930 17:00:02.025999 6488 ovnkube.go:599] Stopped ovnkube\\\\nI0930 17:00:02.026038 6488 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0930 17:00:02.026062 6488 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI0930 17:00:02.026103 6488 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 2.84689ms\\\\nF0930 17:00:02.026110 6488 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T17:00:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T17:00:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:29Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.593322 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4p4hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4p4hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:29Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.602648 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:29Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.616802 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:29Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.628737 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:29Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.640846 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:29Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.650782 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:29Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.654323 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.654360 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.654393 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.654409 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.654419 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:29Z","lastTransitionTime":"2025-09-30T17:00:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.663046 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:29Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.672772 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0f3ebad-fcdf-44a6-8c6e-6d22c065686d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec96e12decace799896cf4030dcf1991b883886f0253431d9147b9116f45d138\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://592d9003e5053cfe8e19b78a635b0d51105ade6a20239c70a991e8ce3b5b03cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://493d029221297680d7b283e5ffe3d751cd0d58163d87a15519924b99632ef162\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:29Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.683129 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:29Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.694705 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:29Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.705364 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b2cdabb8638db6c90e9b3623898192035c1688291bf0f7e0ffbd32f2cd12d35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T17:00:26Z\\\",\\\"message\\\":\\\"2025-09-30T16:59:41+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_546e080d-1dc7-41ef-b705-30ec6ab5a22b\\\\n2025-09-30T16:59:41+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_546e080d-1dc7-41ef-b705-30ec6ab5a22b to /host/opt/cni/bin/\\\\n2025-09-30T16:59:41Z [verbose] multus-daemon started\\\\n2025-09-30T16:59:41Z [verbose] Readiness Indicator file check\\\\n2025-09-30T17:00:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T17:00:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:29Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.757092 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.757133 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.757143 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.757157 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.757166 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:29Z","lastTransitionTime":"2025-09-30T17:00:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.859731 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.859784 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.859802 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.859827 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.859845 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:29Z","lastTransitionTime":"2025-09-30T17:00:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.963323 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.963394 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.963412 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.963439 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:29 crc kubenswrapper[4818]: I0930 17:00:29.963462 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:29Z","lastTransitionTime":"2025-09-30T17:00:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.020288 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.020363 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:30 crc kubenswrapper[4818]: E0930 17:00:30.020462 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:30 crc kubenswrapper[4818]: E0930 17:00:30.020604 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.020312 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:30 crc kubenswrapper[4818]: E0930 17:00:30.021151 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.067040 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.067354 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.067578 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.067819 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.068420 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:30Z","lastTransitionTime":"2025-09-30T17:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.171144 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.171184 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.171194 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.171210 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.171222 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:30Z","lastTransitionTime":"2025-09-30T17:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.274842 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.274910 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.274977 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.275004 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.275020 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:30Z","lastTransitionTime":"2025-09-30T17:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.377008 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.377063 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.377083 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.377110 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.377129 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:30Z","lastTransitionTime":"2025-09-30T17:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.480562 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.480613 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.480629 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.480650 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.480667 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:30Z","lastTransitionTime":"2025-09-30T17:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.493605 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-ljmfd_68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/ovnkube-controller/3.log" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.494557 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-ljmfd_68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/ovnkube-controller/2.log" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.502085 4818 generic.go:334] "Generic (PLEG): container finished" podID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerID="fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7" exitCode=1 Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.502163 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerDied","Data":"fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7"} Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.502228 4818 scope.go:117] "RemoveContainer" containerID="e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.503411 4818 scope.go:117] "RemoveContainer" containerID="fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7" Sep 30 17:00:30 crc kubenswrapper[4818]: E0930 17:00:30.503748 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-ljmfd_openshift-ovn-kubernetes(68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.521240 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:30Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.545237 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:30Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.566133 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:30Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.583083 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.583144 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.583160 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.583185 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.583203 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:30Z","lastTransitionTime":"2025-09-30T17:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.592120 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:30Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.610215 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:30Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.630660 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:30Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.652812 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0f3ebad-fcdf-44a6-8c6e-6d22c065686d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec96e12decace799896cf4030dcf1991b883886f0253431d9147b9116f45d138\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://592d9003e5053cfe8e19b78a635b0d51105ade6a20239c70a991e8ce3b5b03cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://493d029221297680d7b283e5ffe3d751cd0d58163d87a15519924b99632ef162\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:30Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.674494 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:30Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.685902 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.685957 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.685967 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.685980 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.685989 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:30Z","lastTransitionTime":"2025-09-30T17:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.692983 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:30Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.713897 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b2cdabb8638db6c90e9b3623898192035c1688291bf0f7e0ffbd32f2cd12d35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T17:00:26Z\\\",\\\"message\\\":\\\"2025-09-30T16:59:41+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_546e080d-1dc7-41ef-b705-30ec6ab5a22b\\\\n2025-09-30T16:59:41+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_546e080d-1dc7-41ef-b705-30ec6ab5a22b to /host/opt/cni/bin/\\\\n2025-09-30T16:59:41Z [verbose] multus-daemon started\\\\n2025-09-30T16:59:41Z [verbose] Readiness Indicator file check\\\\n2025-09-30T17:00:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T17:00:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:30Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.733174 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:30Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.752398 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:30Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.775439 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:30Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.789530 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.789587 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.789603 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.789627 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.789646 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:30Z","lastTransitionTime":"2025-09-30T17:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.793050 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fce7a982-d6f8-46fc-94e2-c029a2b439c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd8d3f0c4fcccedb06fbc60c7a25bcf4e89f76327f03e068f83727d88a49a1e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e9cc726587a32bf3541a30a6b1ad2d189fdb4585fd5a17f84b6b6b2ac77eb6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-j579t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:30Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.808161 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:30Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.835899 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e189075879e77799102f18020f436eb4e11124fb49b1783b30cef4367259b1f0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T17:00:02Z\\\",\\\"message\\\":\\\"-multus\\\\\\\", Name:\\\\\\\"network-metrics-daemon-4p4hg\\\\\\\", UID:\\\\\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\\\\\", APIVersion:\\\\\\\"v1\\\\\\\", ResourceVersion:\\\\\\\"26911\\\\\\\", FieldPath:\\\\\\\"\\\\\\\"}): type: 'Warning' reason: 'ErrorAddingResource' addLogicalPort failed for openshift-multus/network-metrics-daemon-4p4hg: failed to update pod openshift-multus/network-metrics-daemon-4p4hg: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:02Z is after 2025-08-24T17:21:41Z\\\\nI0930 17:00:02.025323 6488 handler.go:208] Removed *v1.Node event handler 2\\\\nI0930 17:00:02.025999 6488 ovnkube.go:599] Stopped ovnkube\\\\nI0930 17:00:02.026038 6488 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0930 17:00:02.026062 6488 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI0930 17:00:02.026103 6488 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 2.84689ms\\\\nF0930 17:00:02.026110 6488 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T17:00:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T17:00:29Z\\\",\\\"message\\\":\\\"oauth-apiserver] map[operator.openshift.io/spec-hash:9c74227d7f96d723d980c50373a5e91f08c5893365bfd5a5040449b1b6585a23 prometheus.io/scheme:https prometheus.io/scrape:true service.alpha.openshift.io/serving-cert-secret-name:serving-cert service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:https,Protocol:TCP,Port:443,TargetPort:{0 8443 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{apiserver: true,},ClusterIP:10.217.4.140,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.140],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI0930 17:00:29.844797 6846 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T17:00:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:30Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.854083 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4p4hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4p4hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:30Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.891668 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.891720 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.891737 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.891762 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.891777 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:30Z","lastTransitionTime":"2025-09-30T17:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.995404 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.995446 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.995457 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.995475 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:30 crc kubenswrapper[4818]: I0930 17:00:30.995487 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:30Z","lastTransitionTime":"2025-09-30T17:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.020171 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:31 crc kubenswrapper[4818]: E0930 17:00:31.020445 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.098281 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.098343 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.098366 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.098399 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.098427 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:31Z","lastTransitionTime":"2025-09-30T17:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.201856 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.201960 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.202012 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.202040 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.202061 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:31Z","lastTransitionTime":"2025-09-30T17:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.306179 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.306253 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.306272 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.306302 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.306329 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:31Z","lastTransitionTime":"2025-09-30T17:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.410176 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.410264 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.410281 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.410310 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.410331 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:31Z","lastTransitionTime":"2025-09-30T17:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.509337 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-ljmfd_68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/ovnkube-controller/3.log" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.512035 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.512095 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.512116 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.512145 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.512167 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:31Z","lastTransitionTime":"2025-09-30T17:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.515240 4818 scope.go:117] "RemoveContainer" containerID="fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7" Sep 30 17:00:31 crc kubenswrapper[4818]: E0930 17:00:31.515559 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-ljmfd_openshift-ovn-kubernetes(68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.533817 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fce7a982-d6f8-46fc-94e2-c029a2b439c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd8d3f0c4fcccedb06fbc60c7a25bcf4e89f76327f03e068f83727d88a49a1e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e9cc726587a32bf3541a30a6b1ad2d189fdb4585fd5a17f84b6b6b2ac77eb6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-j579t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.554624 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.573867 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.595371 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.611961 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4p4hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4p4hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.616778 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.616878 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.616909 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.616975 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.617002 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:31Z","lastTransitionTime":"2025-09-30T17:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.628812 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.657158 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T17:00:29Z\\\",\\\"message\\\":\\\"oauth-apiserver] map[operator.openshift.io/spec-hash:9c74227d7f96d723d980c50373a5e91f08c5893365bfd5a5040449b1b6585a23 prometheus.io/scheme:https prometheus.io/scrape:true service.alpha.openshift.io/serving-cert-secret-name:serving-cert service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:https,Protocol:TCP,Port:443,TargetPort:{0 8443 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{apiserver: true,},ClusterIP:10.217.4.140,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.140],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI0930 17:00:29.844797 6846 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T17:00:29Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-ljmfd_openshift-ovn-kubernetes(68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.679102 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.696822 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.716448 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.720163 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.720361 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.720499 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.720635 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.720763 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:31Z","lastTransitionTime":"2025-09-30T17:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.734829 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.755505 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.770917 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.789905 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b2cdabb8638db6c90e9b3623898192035c1688291bf0f7e0ffbd32f2cd12d35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T17:00:26Z\\\",\\\"message\\\":\\\"2025-09-30T16:59:41+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_546e080d-1dc7-41ef-b705-30ec6ab5a22b\\\\n2025-09-30T16:59:41+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_546e080d-1dc7-41ef-b705-30ec6ab5a22b to /host/opt/cni/bin/\\\\n2025-09-30T16:59:41Z [verbose] multus-daemon started\\\\n2025-09-30T16:59:41Z [verbose] Readiness Indicator file check\\\\n2025-09-30T17:00:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T17:00:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.797556 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.797822 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.798050 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.798248 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.798452 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:31Z","lastTransitionTime":"2025-09-30T17:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.806424 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0f3ebad-fcdf-44a6-8c6e-6d22c065686d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec96e12decace799896cf4030dcf1991b883886f0253431d9147b9116f45d138\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://592d9003e5053cfe8e19b78a635b0d51105ade6a20239c70a991e8ce3b5b03cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://493d029221297680d7b283e5ffe3d751cd0d58163d87a15519924b99632ef162\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: E0930 17:00:31.813527 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.819345 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.819704 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.819860 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.820048 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.820214 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:31Z","lastTransitionTime":"2025-09-30T17:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.824395 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: E0930 17:00:31.838047 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.842731 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.843483 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.843671 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.843861 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.844112 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.844175 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:31Z","lastTransitionTime":"2025-09-30T17:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:31 crc kubenswrapper[4818]: E0930 17:00:31.863379 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.869129 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.869195 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.869213 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.869238 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.869258 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:31Z","lastTransitionTime":"2025-09-30T17:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:31 crc kubenswrapper[4818]: E0930 17:00:31.887905 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.894028 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.894080 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.894101 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.894128 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.894145 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:31Z","lastTransitionTime":"2025-09-30T17:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:31 crc kubenswrapper[4818]: E0930 17:00:31.912801 4818 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5773b9f7-7ba9-4297-8817-bd7e24295211\\\",\\\"systemUUID\\\":\\\"6f343d33-25c9-4fa3-9228-821c4ed396ef\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:31Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:31 crc kubenswrapper[4818]: E0930 17:00:31.913395 4818 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.916416 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.916487 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.916512 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.916542 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:31 crc kubenswrapper[4818]: I0930 17:00:31.916567 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:31Z","lastTransitionTime":"2025-09-30T17:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.019842 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:32 crc kubenswrapper[4818]: E0930 17:00:32.020141 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.020193 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.020266 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:32 crc kubenswrapper[4818]: E0930 17:00:32.020371 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:32 crc kubenswrapper[4818]: E0930 17:00:32.020540 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.020631 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.020662 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.020720 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.020742 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.020761 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:32Z","lastTransitionTime":"2025-09-30T17:00:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.123507 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.123583 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.123606 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.123636 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.123655 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:32Z","lastTransitionTime":"2025-09-30T17:00:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.226469 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.226525 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.226542 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.226564 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.226581 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:32Z","lastTransitionTime":"2025-09-30T17:00:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.330007 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.330054 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.330071 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.330093 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.330110 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:32Z","lastTransitionTime":"2025-09-30T17:00:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.432724 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.432787 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.432804 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.432829 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.432849 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:32Z","lastTransitionTime":"2025-09-30T17:00:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.535952 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.536842 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.537137 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.537355 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.537501 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:32Z","lastTransitionTime":"2025-09-30T17:00:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.640878 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.640996 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.641022 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.641051 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.641073 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:32Z","lastTransitionTime":"2025-09-30T17:00:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.744007 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.744331 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.744540 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.744689 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.744963 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:32Z","lastTransitionTime":"2025-09-30T17:00:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.847890 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.847972 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.847990 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.848014 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.848031 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:32Z","lastTransitionTime":"2025-09-30T17:00:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.950864 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.950950 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.950975 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.951005 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:32 crc kubenswrapper[4818]: I0930 17:00:32.951026 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:32Z","lastTransitionTime":"2025-09-30T17:00:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.020330 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:33 crc kubenswrapper[4818]: E0930 17:00:33.020457 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.054324 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.054381 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.054401 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.054425 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.054444 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:33Z","lastTransitionTime":"2025-09-30T17:00:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.158665 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.158725 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.158742 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.158769 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.158787 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:33Z","lastTransitionTime":"2025-09-30T17:00:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.264740 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.264799 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.264817 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.264842 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.264859 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:33Z","lastTransitionTime":"2025-09-30T17:00:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.367953 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.367987 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.368030 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.368048 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.368059 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:33Z","lastTransitionTime":"2025-09-30T17:00:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.471095 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.471166 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.471177 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.471229 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.471253 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:33Z","lastTransitionTime":"2025-09-30T17:00:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.574456 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.574514 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.574533 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.574566 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.574584 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:33Z","lastTransitionTime":"2025-09-30T17:00:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.677527 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.677597 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.677616 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.677642 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.677661 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:33Z","lastTransitionTime":"2025-09-30T17:00:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.781409 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.781498 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.781559 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.781587 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.781606 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:33Z","lastTransitionTime":"2025-09-30T17:00:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.885163 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.885581 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.885833 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.886095 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.886310 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:33Z","lastTransitionTime":"2025-09-30T17:00:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.989731 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.989823 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.989854 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.989887 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:33 crc kubenswrapper[4818]: I0930 17:00:33.989910 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:33Z","lastTransitionTime":"2025-09-30T17:00:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.019433 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.019532 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.019467 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:34 crc kubenswrapper[4818]: E0930 17:00:34.019642 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:34 crc kubenswrapper[4818]: E0930 17:00:34.019803 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:34 crc kubenswrapper[4818]: E0930 17:00:34.019907 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.041805 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0f3ebad-fcdf-44a6-8c6e-6d22c065686d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec96e12decace799896cf4030dcf1991b883886f0253431d9147b9116f45d138\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://592d9003e5053cfe8e19b78a635b0d51105ade6a20239c70a991e8ce3b5b03cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://493d029221297680d7b283e5ffe3d751cd0d58163d87a15519924b99632ef162\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://278290c02d96a50764ee0b6ff074c7b353536888a88a1c9a9c98508e06eba071\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:34Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.068137 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92a718de955bafc3f357d2869e377ddaad22944f3e79d19d7fd14a73328002ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad977043dbc88d356779b974e557af9c245e0f90735c2e00a086978987d63bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:34Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.087747 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e908152-dcb2-4b41-974d-26b03ae0254b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d65b63a83b095ad00cb9dd783bfd232da0e36fecb60cf825e05a0e83188f8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qdxhd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vc6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:34Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.093226 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.093264 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.093278 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.093294 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.093305 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:34Z","lastTransitionTime":"2025-09-30T17:00:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.108966 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hq6j2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d36fce8a-ff27-48bf-be9c-67fc2046136d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T17:00:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b2cdabb8638db6c90e9b3623898192035c1688291bf0f7e0ffbd32f2cd12d35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T17:00:26Z\\\",\\\"message\\\":\\\"2025-09-30T16:59:41+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_546e080d-1dc7-41ef-b705-30ec6ab5a22b\\\\n2025-09-30T16:59:41+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_546e080d-1dc7-41ef-b705-30ec6ab5a22b to /host/opt/cni/bin/\\\\n2025-09-30T16:59:41Z [verbose] multus-daemon started\\\\n2025-09-30T16:59:41Z [verbose] Readiness Indicator file check\\\\n2025-09-30T17:00:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T17:00:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8ckz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hq6j2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:34Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.122849 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:34Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.138093 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:34Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.164640 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd02846-2628-4200-a7fe-886042bd15bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31568e81f645f432a0248322f0cebcae6cdff36ff1f6ac7dec0d41326a73b1d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae5483b4e0b364b25a959bacb151c74df176c8138002dd73fc5b856676834444\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328892a131acb74b94e8c376dada13177359ec9b41ca7592ee44fc4dd8443f41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://271f0491950fe94a05e22183f05d6d1a743041be3e4cae70b32e692782526b40\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83de8df9769578b5f7de6320e6696a14ab88d5658c3d62be14b4c7eabdda020f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aecdb82d8d333da0fc403cfff87b65e37d56599698a3b4340d9a882a4c0d8544\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c776d024bb7d5b35eecfff8acd8bb16de08d10e841ed00b88a86e9cdf1d57aee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gsxbk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-wzw6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:34Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.180482 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fce7a982-d6f8-46fc-94e2-c029a2b439c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd8d3f0c4fcccedb06fbc60c7a25bcf4e89f76327f03e068f83727d88a49a1e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e9cc726587a32bf3541a30a6b1ad2d189fdb4585fd5a17f84b6b6b2ac77eb6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhbgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-j579t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:34Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.195087 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-gd5fd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17ee0898-ae49-455e-b283-185058ad07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3611fe1ab0bdc00ed78bf9716d50fb3141cef946d54c72e4ab35ee767ad4f013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxpnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-gd5fd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:34Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.196469 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.196531 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.196548 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.196571 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.196589 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:34Z","lastTransitionTime":"2025-09-30T17:00:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.223573 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-30T17:00:29Z\\\",\\\"message\\\":\\\"oauth-apiserver] map[operator.openshift.io/spec-hash:9c74227d7f96d723d980c50373a5e91f08c5893365bfd5a5040449b1b6585a23 prometheus.io/scheme:https prometheus.io/scrape:true service.alpha.openshift.io/serving-cert-secret-name:serving-cert service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:https,Protocol:TCP,Port:443,TargetPort:{0 8443 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{apiserver: true,},ClusterIP:10.217.4.140,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.140],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI0930 17:00:29.844797 6846 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T17:00:29Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-ljmfd_openshift-ovn-kubernetes(68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vxqgg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-ljmfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:34Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.236653 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4p4hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bvqst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4p4hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:34Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.257383 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700e0149-1d6e-44d7-971b-063e1dbb1eb5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55a72ca09e24ebc8d8ef663cb10fe543a3961c59266f656a966cad019ed04a5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3c45feb3526da6388c960dc545d3f76035be47389d561e50d589198ec57784\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e05314b493b8701ca89965e53cf353d4268500f18122768c19963255faa792c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e91a002c3b60329db53b3958da91c7ed641ab8efca320d29c0bfb95248389e3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52749184b3bdacaaaf866a5ead8426c7fef830151be1596c727dd49254387245\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-30T16:59:34Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0930 16:59:27.754805 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0930 16:59:27.757011 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2138846315/tls.crt::/tmp/serving-cert-2138846315/tls.key\\\\\\\"\\\\nI0930 16:59:33.974435 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0930 16:59:33.980536 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0930 16:59:33.980593 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0930 16:59:33.980648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0930 16:59:33.980664 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0930 16:59:33.993819 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0930 16:59:33.993861 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993874 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0930 16:59:33.993884 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0930 16:59:33.993892 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0930 16:59:33.993899 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0930 16:59:33.993906 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0930 16:59:33.994303 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0930 16:59:34.001162 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd0163ba4df373ae45a943a033db9365d65c061a532f3ab290642b5aaa4160a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b89865f68f97bc951fbb359a201470ddeae089076c5e9bdf739f43b6acfe8c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-30T16:59:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:34Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.279864 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d3b09b7-1a98-4442-90e8-40c872e83cf8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bf887087d638df006e291ccf123eb071a3b2b140c7b765d8eac6228fed0a4bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7f3e154af90087e6b0140eb8edea6ff9cb9116d4e2948a883883b3ad6725649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://293764b79dabc497705485a0f7f35d1423de9a178321445b54b6c06629fc180c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c40f191787f2675dfa686dccf0e32e9a8ee7f7b8b736df7dfb294fa11c4abc47\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:34Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.297459 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81e20558a944e83645b42b2595a035c7fad80d0b7a081efde9283ab442fee10c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:34Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.299062 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.299117 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.299134 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.299158 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.299175 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:34Z","lastTransitionTime":"2025-09-30T17:00:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.311550 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ba31da6417ce3229c56699329a5b3de952bf6239fe48ad1cc3dec16fd74fac1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:34Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.324092 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:34Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.333386 4818 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vmncz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85fe3d18-20dd-467f-be69-fcaa139126f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-30T16:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://549f5757951d1e2bb7f8f6166c63f16690ac0d4d8068e83b721af20f12b40c7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-30T16:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s9zrn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-30T16:59:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vmncz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-30T17:00:34Z is after 2025-08-24T17:21:41Z" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.401746 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.401803 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.401820 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.401843 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.401860 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:34Z","lastTransitionTime":"2025-09-30T17:00:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.505201 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.505265 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.505284 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.505307 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.505325 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:34Z","lastTransitionTime":"2025-09-30T17:00:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.607898 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.607988 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.608000 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.608017 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.608055 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:34Z","lastTransitionTime":"2025-09-30T17:00:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.710793 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.710825 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.710856 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.710874 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.710886 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:34Z","lastTransitionTime":"2025-09-30T17:00:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.813842 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.813908 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.813978 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.814011 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.814033 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:34Z","lastTransitionTime":"2025-09-30T17:00:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.917005 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.917087 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.917111 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.917143 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:34 crc kubenswrapper[4818]: I0930 17:00:34.917165 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:34Z","lastTransitionTime":"2025-09-30T17:00:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.019357 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:35 crc kubenswrapper[4818]: E0930 17:00:35.019881 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.020141 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.020241 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.020302 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.020330 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.020349 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:35Z","lastTransitionTime":"2025-09-30T17:00:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.123074 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.123127 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.123142 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.123165 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.123180 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:35Z","lastTransitionTime":"2025-09-30T17:00:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.225843 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.225885 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.225898 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.225923 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.225956 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:35Z","lastTransitionTime":"2025-09-30T17:00:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.328614 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.328679 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.328701 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.328730 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.328752 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:35Z","lastTransitionTime":"2025-09-30T17:00:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.432667 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.432733 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.432753 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.432780 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.432797 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:35Z","lastTransitionTime":"2025-09-30T17:00:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.535593 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.535647 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.535660 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.535682 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.535697 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:35Z","lastTransitionTime":"2025-09-30T17:00:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.639710 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.639781 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.639799 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.639824 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.639841 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:35Z","lastTransitionTime":"2025-09-30T17:00:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.742524 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.742583 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.742602 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.742625 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.742642 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:35Z","lastTransitionTime":"2025-09-30T17:00:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.845467 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.845528 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.845544 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.845566 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.845582 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:35Z","lastTransitionTime":"2025-09-30T17:00:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.948810 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.949231 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.949355 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.949515 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:35 crc kubenswrapper[4818]: I0930 17:00:35.949655 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:35Z","lastTransitionTime":"2025-09-30T17:00:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.019865 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.019881 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:36 crc kubenswrapper[4818]: E0930 17:00:36.020106 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.020191 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:36 crc kubenswrapper[4818]: E0930 17:00:36.020270 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:36 crc kubenswrapper[4818]: E0930 17:00:36.020427 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.052203 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.052259 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.052275 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.052296 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.052310 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:36Z","lastTransitionTime":"2025-09-30T17:00:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.157406 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.157472 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.157492 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.157515 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.157532 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:36Z","lastTransitionTime":"2025-09-30T17:00:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.265481 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.265906 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.266118 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.266292 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.266424 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:36Z","lastTransitionTime":"2025-09-30T17:00:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.369062 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.369123 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.369134 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.369152 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.369164 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:36Z","lastTransitionTime":"2025-09-30T17:00:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.472051 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.472092 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.472103 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.472118 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.472131 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:36Z","lastTransitionTime":"2025-09-30T17:00:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.574422 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.574805 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.574997 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.575177 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.575305 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:36Z","lastTransitionTime":"2025-09-30T17:00:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.679240 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.679323 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.679347 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.679380 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.679403 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:36Z","lastTransitionTime":"2025-09-30T17:00:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.782526 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.782583 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.782600 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.782623 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.782640 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:36Z","lastTransitionTime":"2025-09-30T17:00:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.884976 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.885015 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.885024 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.885041 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.885051 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:36Z","lastTransitionTime":"2025-09-30T17:00:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.988228 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.988319 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.988339 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.988365 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:36 crc kubenswrapper[4818]: I0930 17:00:36.988384 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:36Z","lastTransitionTime":"2025-09-30T17:00:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.019994 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:37 crc kubenswrapper[4818]: E0930 17:00:37.020212 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.091539 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.091606 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.091623 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.091649 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.091667 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:37Z","lastTransitionTime":"2025-09-30T17:00:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.194353 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.194391 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.194400 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.194412 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.194422 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:37Z","lastTransitionTime":"2025-09-30T17:00:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.297295 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.297349 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.297367 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.297395 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.297417 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:37Z","lastTransitionTime":"2025-09-30T17:00:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.399753 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.399816 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.399832 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.399856 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.399874 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:37Z","lastTransitionTime":"2025-09-30T17:00:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.502223 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.502306 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.502329 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.502359 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.502381 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:37Z","lastTransitionTime":"2025-09-30T17:00:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.606602 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.606703 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.606726 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.606755 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.606772 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:37Z","lastTransitionTime":"2025-09-30T17:00:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.708938 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.708981 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.708995 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.709015 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.709028 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:37Z","lastTransitionTime":"2025-09-30T17:00:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.812289 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.813025 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.813133 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.813248 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.813351 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:37Z","lastTransitionTime":"2025-09-30T17:00:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.916432 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.916491 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.916506 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.916530 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.916543 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:37Z","lastTransitionTime":"2025-09-30T17:00:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.950206 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.950360 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:37 crc kubenswrapper[4818]: E0930 17:00:37.950422 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:41.950392488 +0000 UTC m=+148.704664334 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:00:37 crc kubenswrapper[4818]: I0930 17:00:37.950468 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:37 crc kubenswrapper[4818]: E0930 17:00:37.950512 4818 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Sep 30 17:00:37 crc kubenswrapper[4818]: E0930 17:00:37.950579 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-30 17:01:41.950561844 +0000 UTC m=+148.704833700 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Sep 30 17:00:37 crc kubenswrapper[4818]: E0930 17:00:37.950609 4818 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 30 17:00:37 crc kubenswrapper[4818]: E0930 17:00:37.950661 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-30 17:01:41.950645996 +0000 UTC m=+148.704917852 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.018858 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.019235 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.019383 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.019421 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.019634 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.019567 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.020070 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:38Z","lastTransitionTime":"2025-09-30T17:00:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:38 crc kubenswrapper[4818]: E0930 17:00:38.020122 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.019487 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:38 crc kubenswrapper[4818]: E0930 17:00:38.020303 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:38 crc kubenswrapper[4818]: E0930 17:00:38.020462 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.051669 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.051746 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:38 crc kubenswrapper[4818]: E0930 17:00:38.051876 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 30 17:00:38 crc kubenswrapper[4818]: E0930 17:00:38.051897 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 30 17:00:38 crc kubenswrapper[4818]: E0930 17:00:38.051916 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 30 17:00:38 crc kubenswrapper[4818]: E0930 17:00:38.051972 4818 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 17:00:38 crc kubenswrapper[4818]: E0930 17:00:38.051976 4818 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 30 17:00:38 crc kubenswrapper[4818]: E0930 17:00:38.052001 4818 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 17:00:38 crc kubenswrapper[4818]: E0930 17:00:38.052062 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-09-30 17:01:42.052030946 +0000 UTC m=+148.806302802 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 17:00:38 crc kubenswrapper[4818]: E0930 17:00:38.052098 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-09-30 17:01:42.052081457 +0000 UTC m=+148.806353313 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.123120 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.123185 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.123209 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.123237 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.123260 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:38Z","lastTransitionTime":"2025-09-30T17:00:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.226429 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.226496 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.226518 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.226544 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.226567 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:38Z","lastTransitionTime":"2025-09-30T17:00:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.330204 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.330301 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.330324 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.330387 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.330411 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:38Z","lastTransitionTime":"2025-09-30T17:00:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.433440 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.433503 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.433527 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.433558 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.433577 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:38Z","lastTransitionTime":"2025-09-30T17:00:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.536719 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.536793 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.536816 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.536841 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.536866 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:38Z","lastTransitionTime":"2025-09-30T17:00:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.640138 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.640215 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.640231 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.640261 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.640279 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:38Z","lastTransitionTime":"2025-09-30T17:00:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.743246 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.743298 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.743314 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.743336 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.743356 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:38Z","lastTransitionTime":"2025-09-30T17:00:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.846340 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.846391 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.846403 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.846422 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.846436 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:38Z","lastTransitionTime":"2025-09-30T17:00:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.949816 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.949857 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.949865 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.949878 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:38 crc kubenswrapper[4818]: I0930 17:00:38.949889 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:38Z","lastTransitionTime":"2025-09-30T17:00:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.019957 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:39 crc kubenswrapper[4818]: E0930 17:00:39.020137 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.054563 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.054662 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.054684 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.054710 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.054729 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:39Z","lastTransitionTime":"2025-09-30T17:00:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.157746 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.157858 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.157884 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.157912 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.157988 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:39Z","lastTransitionTime":"2025-09-30T17:00:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.260407 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.260448 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.260460 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.260478 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.260489 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:39Z","lastTransitionTime":"2025-09-30T17:00:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.364185 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.364246 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.364267 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.364295 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.364316 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:39Z","lastTransitionTime":"2025-09-30T17:00:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.467195 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.467253 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.467273 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.467296 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.467313 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:39Z","lastTransitionTime":"2025-09-30T17:00:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.569693 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.569735 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.569748 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.569768 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.569780 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:39Z","lastTransitionTime":"2025-09-30T17:00:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.673099 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.673151 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.673163 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.673186 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.673200 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:39Z","lastTransitionTime":"2025-09-30T17:00:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.776553 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.777148 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.777178 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.777202 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.777221 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:39Z","lastTransitionTime":"2025-09-30T17:00:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.880675 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.880742 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.880759 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.880812 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.880834 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:39Z","lastTransitionTime":"2025-09-30T17:00:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.985254 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.985323 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.985342 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.985368 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:39 crc kubenswrapper[4818]: I0930 17:00:39.985440 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:39Z","lastTransitionTime":"2025-09-30T17:00:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.019757 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.019807 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.019841 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:40 crc kubenswrapper[4818]: E0930 17:00:40.019977 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:40 crc kubenswrapper[4818]: E0930 17:00:40.020077 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:40 crc kubenswrapper[4818]: E0930 17:00:40.020172 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.088998 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.089073 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.089091 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.089118 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.089140 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:40Z","lastTransitionTime":"2025-09-30T17:00:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.192186 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.192254 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.192271 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.192295 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.192316 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:40Z","lastTransitionTime":"2025-09-30T17:00:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.295916 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.295991 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.296003 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.296021 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.296036 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:40Z","lastTransitionTime":"2025-09-30T17:00:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.398718 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.398787 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.398811 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.398844 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.398873 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:40Z","lastTransitionTime":"2025-09-30T17:00:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.502450 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.502519 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.502543 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.502575 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.502595 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:40Z","lastTransitionTime":"2025-09-30T17:00:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.606028 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.606092 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.606108 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.606132 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.606148 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:40Z","lastTransitionTime":"2025-09-30T17:00:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.709550 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.709614 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.709632 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.709656 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.709674 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:40Z","lastTransitionTime":"2025-09-30T17:00:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.812554 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.812629 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.812653 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.812677 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.812707 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:40Z","lastTransitionTime":"2025-09-30T17:00:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.916633 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.916699 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.916717 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.916741 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:40 crc kubenswrapper[4818]: I0930 17:00:40.916760 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:40Z","lastTransitionTime":"2025-09-30T17:00:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.019472 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:41 crc kubenswrapper[4818]: E0930 17:00:41.019658 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.019890 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.019985 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.020004 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.020025 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.020044 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:41Z","lastTransitionTime":"2025-09-30T17:00:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.122802 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.122861 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.122878 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.122901 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.122951 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:41Z","lastTransitionTime":"2025-09-30T17:00:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.226312 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.226364 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.226379 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.226402 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.226420 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:41Z","lastTransitionTime":"2025-09-30T17:00:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.328784 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.328853 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.328871 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.328896 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.328916 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:41Z","lastTransitionTime":"2025-09-30T17:00:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.432164 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.432250 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.432274 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.432303 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.432326 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:41Z","lastTransitionTime":"2025-09-30T17:00:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.535728 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.535801 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.535821 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.535846 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.535874 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:41Z","lastTransitionTime":"2025-09-30T17:00:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.645054 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.645909 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.646191 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.646394 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.646659 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:41Z","lastTransitionTime":"2025-09-30T17:00:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.750127 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.750181 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.750197 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.750222 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.750240 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:41Z","lastTransitionTime":"2025-09-30T17:00:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.854029 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.854080 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.854096 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.854120 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.854138 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:41Z","lastTransitionTime":"2025-09-30T17:00:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.957858 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.957955 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.957974 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.957998 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:41 crc kubenswrapper[4818]: I0930 17:00:41.958015 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:41Z","lastTransitionTime":"2025-09-30T17:00:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.020228 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.020338 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.020413 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:42 crc kubenswrapper[4818]: E0930 17:00:42.020624 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:42 crc kubenswrapper[4818]: E0930 17:00:42.020771 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:42 crc kubenswrapper[4818]: E0930 17:00:42.020877 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.060766 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.060818 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.060835 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.060860 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.060878 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:42Z","lastTransitionTime":"2025-09-30T17:00:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.139096 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.139176 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.139193 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.139220 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.139239 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:42Z","lastTransitionTime":"2025-09-30T17:00:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.173189 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.173233 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.173245 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.173266 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.173278 4818 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-30T17:00:42Z","lastTransitionTime":"2025-09-30T17:00:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.211441 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8"] Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.212308 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.214322 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.215755 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.215946 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.216093 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.298061 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/89bfa286-382d-4fd8-995d-70fbd141f8ec-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-kfpk8\" (UID: \"89bfa286-382d-4fd8-995d-70fbd141f8ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.298168 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/89bfa286-382d-4fd8-995d-70fbd141f8ec-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-kfpk8\" (UID: \"89bfa286-382d-4fd8-995d-70fbd141f8ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.298233 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/89bfa286-382d-4fd8-995d-70fbd141f8ec-service-ca\") pod \"cluster-version-operator-5c965bbfc6-kfpk8\" (UID: \"89bfa286-382d-4fd8-995d-70fbd141f8ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.298287 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/89bfa286-382d-4fd8-995d-70fbd141f8ec-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-kfpk8\" (UID: \"89bfa286-382d-4fd8-995d-70fbd141f8ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.298321 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/89bfa286-382d-4fd8-995d-70fbd141f8ec-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-kfpk8\" (UID: \"89bfa286-382d-4fd8-995d-70fbd141f8ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.298629 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-vmncz" podStartSLOduration=64.298610677 podStartE2EDuration="1m4.298610677s" podCreationTimestamp="2025-09-30 16:59:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:00:42.297978197 +0000 UTC m=+89.052250073" watchObservedRunningTime="2025-09-30 17:00:42.298610677 +0000 UTC m=+89.052882533" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.326509 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=68.326462261 podStartE2EDuration="1m8.326462261s" podCreationTimestamp="2025-09-30 16:59:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:00:42.326383528 +0000 UTC m=+89.080655374" watchObservedRunningTime="2025-09-30 17:00:42.326462261 +0000 UTC m=+89.080734127" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.351541 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=68.351517244 podStartE2EDuration="1m8.351517244s" podCreationTimestamp="2025-09-30 16:59:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:00:42.350997277 +0000 UTC m=+89.105269123" watchObservedRunningTime="2025-09-30 17:00:42.351517244 +0000 UTC m=+89.105789070" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.394943 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-hq6j2" podStartSLOduration=63.394888261 podStartE2EDuration="1m3.394888261s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:00:42.394342113 +0000 UTC m=+89.148613939" watchObservedRunningTime="2025-09-30 17:00:42.394888261 +0000 UTC m=+89.149160087" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.395315 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podStartSLOduration=64.395307875 podStartE2EDuration="1m4.395307875s" podCreationTimestamp="2025-09-30 16:59:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:00:42.370968315 +0000 UTC m=+89.125240171" watchObservedRunningTime="2025-09-30 17:00:42.395307875 +0000 UTC m=+89.149579701" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.398817 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/89bfa286-382d-4fd8-995d-70fbd141f8ec-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-kfpk8\" (UID: \"89bfa286-382d-4fd8-995d-70fbd141f8ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.398880 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/89bfa286-382d-4fd8-995d-70fbd141f8ec-service-ca\") pod \"cluster-version-operator-5c965bbfc6-kfpk8\" (UID: \"89bfa286-382d-4fd8-995d-70fbd141f8ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.398917 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/89bfa286-382d-4fd8-995d-70fbd141f8ec-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-kfpk8\" (UID: \"89bfa286-382d-4fd8-995d-70fbd141f8ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.398964 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/89bfa286-382d-4fd8-995d-70fbd141f8ec-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-kfpk8\" (UID: \"89bfa286-382d-4fd8-995d-70fbd141f8ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.398960 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/89bfa286-382d-4fd8-995d-70fbd141f8ec-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-kfpk8\" (UID: \"89bfa286-382d-4fd8-995d-70fbd141f8ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.398995 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/89bfa286-382d-4fd8-995d-70fbd141f8ec-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-kfpk8\" (UID: \"89bfa286-382d-4fd8-995d-70fbd141f8ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.399114 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/89bfa286-382d-4fd8-995d-70fbd141f8ec-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-kfpk8\" (UID: \"89bfa286-382d-4fd8-995d-70fbd141f8ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.400190 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/89bfa286-382d-4fd8-995d-70fbd141f8ec-service-ca\") pod \"cluster-version-operator-5c965bbfc6-kfpk8\" (UID: \"89bfa286-382d-4fd8-995d-70fbd141f8ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.409769 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/89bfa286-382d-4fd8-995d-70fbd141f8ec-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-kfpk8\" (UID: \"89bfa286-382d-4fd8-995d-70fbd141f8ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.428321 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/89bfa286-382d-4fd8-995d-70fbd141f8ec-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-kfpk8\" (UID: \"89bfa286-382d-4fd8-995d-70fbd141f8ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.434610 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=39.434574919 podStartE2EDuration="39.434574919s" podCreationTimestamp="2025-09-30 17:00:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:00:42.434020431 +0000 UTC m=+89.188292257" watchObservedRunningTime="2025-09-30 17:00:42.434574919 +0000 UTC m=+89.188846785" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.486299 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-wzw6f" podStartSLOduration=63.486266236 podStartE2EDuration="1m3.486266236s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:00:42.485335716 +0000 UTC m=+89.239607542" watchObservedRunningTime="2025-09-30 17:00:42.486266236 +0000 UTC m=+89.240538112" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.499831 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-j579t" podStartSLOduration=63.499805145 podStartE2EDuration="1m3.499805145s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:00:42.498987769 +0000 UTC m=+89.253259595" watchObservedRunningTime="2025-09-30 17:00:42.499805145 +0000 UTC m=+89.254076991" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.534955 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" Sep 30 17:00:42 crc kubenswrapper[4818]: I0930 17:00:42.619243 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-gd5fd" podStartSLOduration=64.61921871 podStartE2EDuration="1m4.61921871s" podCreationTimestamp="2025-09-30 16:59:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:00:42.618218277 +0000 UTC m=+89.372490093" watchObservedRunningTime="2025-09-30 17:00:42.61921871 +0000 UTC m=+89.373490526" Sep 30 17:00:43 crc kubenswrapper[4818]: I0930 17:00:43.020408 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:43 crc kubenswrapper[4818]: E0930 17:00:43.020976 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:43 crc kubenswrapper[4818]: I0930 17:00:43.560961 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" event={"ID":"89bfa286-382d-4fd8-995d-70fbd141f8ec","Type":"ContainerStarted","Data":"4d52383e45ef9699ed6437c8c210f4209aa8aa48dd23f03a74ff50333fa503a7"} Sep 30 17:00:43 crc kubenswrapper[4818]: I0930 17:00:43.561021 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" event={"ID":"89bfa286-382d-4fd8-995d-70fbd141f8ec","Type":"ContainerStarted","Data":"aed7aaa284588cbbcefe42cb7c12b4e3432c3763f1ff8f52e6542ad34443be91"} Sep 30 17:00:43 crc kubenswrapper[4818]: I0930 17:00:43.578517 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfpk8" podStartSLOduration=64.578480873 podStartE2EDuration="1m4.578480873s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:00:43.577497491 +0000 UTC m=+90.331769317" watchObservedRunningTime="2025-09-30 17:00:43.578480873 +0000 UTC m=+90.332752729" Sep 30 17:00:44 crc kubenswrapper[4818]: I0930 17:00:44.019471 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:44 crc kubenswrapper[4818]: I0930 17:00:44.019519 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:44 crc kubenswrapper[4818]: I0930 17:00:44.019499 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:44 crc kubenswrapper[4818]: E0930 17:00:44.021137 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:44 crc kubenswrapper[4818]: E0930 17:00:44.021222 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:44 crc kubenswrapper[4818]: E0930 17:00:44.021374 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:45 crc kubenswrapper[4818]: I0930 17:00:45.019963 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:45 crc kubenswrapper[4818]: E0930 17:00:45.020494 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:45 crc kubenswrapper[4818]: I0930 17:00:45.033135 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Sep 30 17:00:46 crc kubenswrapper[4818]: I0930 17:00:46.019963 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:46 crc kubenswrapper[4818]: E0930 17:00:46.020115 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:46 crc kubenswrapper[4818]: I0930 17:00:46.020258 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:46 crc kubenswrapper[4818]: I0930 17:00:46.020352 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:46 crc kubenswrapper[4818]: E0930 17:00:46.020486 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:46 crc kubenswrapper[4818]: E0930 17:00:46.020578 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:46 crc kubenswrapper[4818]: I0930 17:00:46.021595 4818 scope.go:117] "RemoveContainer" containerID="fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7" Sep 30 17:00:46 crc kubenswrapper[4818]: E0930 17:00:46.021832 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-ljmfd_openshift-ovn-kubernetes(68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" Sep 30 17:00:47 crc kubenswrapper[4818]: I0930 17:00:47.019711 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:47 crc kubenswrapper[4818]: E0930 17:00:47.019873 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:48 crc kubenswrapper[4818]: I0930 17:00:48.019770 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:48 crc kubenswrapper[4818]: I0930 17:00:48.019918 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:48 crc kubenswrapper[4818]: E0930 17:00:48.020005 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:48 crc kubenswrapper[4818]: I0930 17:00:48.019785 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:48 crc kubenswrapper[4818]: E0930 17:00:48.020314 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:48 crc kubenswrapper[4818]: E0930 17:00:48.020370 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:49 crc kubenswrapper[4818]: I0930 17:00:49.019995 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:49 crc kubenswrapper[4818]: E0930 17:00:49.021071 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:50 crc kubenswrapper[4818]: I0930 17:00:50.020296 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:50 crc kubenswrapper[4818]: E0930 17:00:50.020537 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:50 crc kubenswrapper[4818]: I0930 17:00:50.020333 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:50 crc kubenswrapper[4818]: I0930 17:00:50.020332 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:50 crc kubenswrapper[4818]: E0930 17:00:50.020705 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:50 crc kubenswrapper[4818]: E0930 17:00:50.020866 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:51 crc kubenswrapper[4818]: I0930 17:00:51.020188 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:51 crc kubenswrapper[4818]: E0930 17:00:51.020388 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:52 crc kubenswrapper[4818]: I0930 17:00:52.020029 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:52 crc kubenswrapper[4818]: I0930 17:00:52.020127 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:52 crc kubenswrapper[4818]: E0930 17:00:52.020310 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:52 crc kubenswrapper[4818]: I0930 17:00:52.020367 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:52 crc kubenswrapper[4818]: E0930 17:00:52.020737 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:52 crc kubenswrapper[4818]: E0930 17:00:52.020829 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:53 crc kubenswrapper[4818]: I0930 17:00:53.019961 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:53 crc kubenswrapper[4818]: E0930 17:00:53.020189 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:54 crc kubenswrapper[4818]: I0930 17:00:54.020147 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:54 crc kubenswrapper[4818]: I0930 17:00:54.020188 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:54 crc kubenswrapper[4818]: I0930 17:00:54.020257 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:54 crc kubenswrapper[4818]: E0930 17:00:54.022256 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:54 crc kubenswrapper[4818]: E0930 17:00:54.022542 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:54 crc kubenswrapper[4818]: E0930 17:00:54.022740 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:54 crc kubenswrapper[4818]: I0930 17:00:54.042900 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=9.042874681 podStartE2EDuration="9.042874681s" podCreationTimestamp="2025-09-30 17:00:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:00:54.0409753 +0000 UTC m=+100.795247126" watchObservedRunningTime="2025-09-30 17:00:54.042874681 +0000 UTC m=+100.797146537" Sep 30 17:00:55 crc kubenswrapper[4818]: I0930 17:00:55.020362 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:55 crc kubenswrapper[4818]: E0930 17:00:55.020535 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:56 crc kubenswrapper[4818]: I0930 17:00:56.020031 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:56 crc kubenswrapper[4818]: I0930 17:00:56.020071 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:56 crc kubenswrapper[4818]: E0930 17:00:56.020198 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:56 crc kubenswrapper[4818]: I0930 17:00:56.020038 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:56 crc kubenswrapper[4818]: E0930 17:00:56.020381 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:56 crc kubenswrapper[4818]: E0930 17:00:56.020506 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:57 crc kubenswrapper[4818]: I0930 17:00:57.019606 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:57 crc kubenswrapper[4818]: E0930 17:00:57.020228 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:00:58 crc kubenswrapper[4818]: I0930 17:00:58.020539 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:58 crc kubenswrapper[4818]: I0930 17:00:58.020594 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:00:58 crc kubenswrapper[4818]: I0930 17:00:58.020554 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:00:58 crc kubenswrapper[4818]: E0930 17:00:58.020889 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:00:58 crc kubenswrapper[4818]: E0930 17:00:58.021340 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:00:58 crc kubenswrapper[4818]: E0930 17:00:58.021491 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:00:58 crc kubenswrapper[4818]: I0930 17:00:58.492631 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs\") pod \"network-metrics-daemon-4p4hg\" (UID: \"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\") " pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:00:58 crc kubenswrapper[4818]: E0930 17:00:58.492993 4818 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Sep 30 17:00:58 crc kubenswrapper[4818]: E0930 17:00:58.493069 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs podName:3712d08f-58c2-4fff-9d9f-443ba37fc9c0 nodeName:}" failed. No retries permitted until 2025-09-30 17:02:02.493047196 +0000 UTC m=+169.247319042 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs") pod "network-metrics-daemon-4p4hg" (UID: "3712d08f-58c2-4fff-9d9f-443ba37fc9c0") : object "openshift-multus"/"metrics-daemon-secret" not registered Sep 30 17:00:59 crc kubenswrapper[4818]: I0930 17:00:59.019443 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:00:59 crc kubenswrapper[4818]: E0930 17:00:59.020011 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:01:00 crc kubenswrapper[4818]: I0930 17:01:00.020461 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:01:00 crc kubenswrapper[4818]: I0930 17:01:00.020854 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:00 crc kubenswrapper[4818]: E0930 17:01:00.021360 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:01:00 crc kubenswrapper[4818]: E0930 17:01:00.021663 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:01:00 crc kubenswrapper[4818]: I0930 17:01:00.021962 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:00 crc kubenswrapper[4818]: E0930 17:01:00.022186 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:01:00 crc kubenswrapper[4818]: I0930 17:01:00.041988 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Sep 30 17:01:01 crc kubenswrapper[4818]: I0930 17:01:01.020028 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:01 crc kubenswrapper[4818]: E0930 17:01:01.020441 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:01:01 crc kubenswrapper[4818]: I0930 17:01:01.020763 4818 scope.go:117] "RemoveContainer" containerID="fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7" Sep 30 17:01:01 crc kubenswrapper[4818]: E0930 17:01:01.021016 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-ljmfd_openshift-ovn-kubernetes(68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" Sep 30 17:01:02 crc kubenswrapper[4818]: I0930 17:01:02.020463 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:02 crc kubenswrapper[4818]: I0930 17:01:02.020527 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:02 crc kubenswrapper[4818]: I0930 17:01:02.020493 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:01:02 crc kubenswrapper[4818]: E0930 17:01:02.020720 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:01:02 crc kubenswrapper[4818]: E0930 17:01:02.020869 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:01:02 crc kubenswrapper[4818]: E0930 17:01:02.021061 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:01:03 crc kubenswrapper[4818]: I0930 17:01:03.020291 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:03 crc kubenswrapper[4818]: E0930 17:01:03.021263 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:01:04 crc kubenswrapper[4818]: I0930 17:01:04.020069 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:04 crc kubenswrapper[4818]: I0930 17:01:04.020267 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:04 crc kubenswrapper[4818]: E0930 17:01:04.022288 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:01:04 crc kubenswrapper[4818]: I0930 17:01:04.022375 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:01:04 crc kubenswrapper[4818]: E0930 17:01:04.022478 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:01:04 crc kubenswrapper[4818]: E0930 17:01:04.022591 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:01:04 crc kubenswrapper[4818]: I0930 17:01:04.065086 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=4.065037605 podStartE2EDuration="4.065037605s" podCreationTimestamp="2025-09-30 17:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:04.061720226 +0000 UTC m=+110.815992042" watchObservedRunningTime="2025-09-30 17:01:04.065037605 +0000 UTC m=+110.819309431" Sep 30 17:01:05 crc kubenswrapper[4818]: I0930 17:01:05.020247 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:05 crc kubenswrapper[4818]: E0930 17:01:05.020877 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:01:06 crc kubenswrapper[4818]: I0930 17:01:06.019378 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:06 crc kubenswrapper[4818]: I0930 17:01:06.019417 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:01:06 crc kubenswrapper[4818]: E0930 17:01:06.019520 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:01:06 crc kubenswrapper[4818]: I0930 17:01:06.019676 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:06 crc kubenswrapper[4818]: E0930 17:01:06.019799 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:01:06 crc kubenswrapper[4818]: E0930 17:01:06.019906 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:01:07 crc kubenswrapper[4818]: I0930 17:01:07.019791 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:07 crc kubenswrapper[4818]: E0930 17:01:07.019952 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:01:08 crc kubenswrapper[4818]: I0930 17:01:08.020772 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:08 crc kubenswrapper[4818]: E0930 17:01:08.022300 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:01:08 crc kubenswrapper[4818]: I0930 17:01:08.020981 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:01:08 crc kubenswrapper[4818]: E0930 17:01:08.022897 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:01:08 crc kubenswrapper[4818]: I0930 17:01:08.020164 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:08 crc kubenswrapper[4818]: E0930 17:01:08.023902 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:01:09 crc kubenswrapper[4818]: I0930 17:01:09.020037 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:09 crc kubenswrapper[4818]: E0930 17:01:09.020475 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:01:10 crc kubenswrapper[4818]: I0930 17:01:10.020505 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:10 crc kubenswrapper[4818]: I0930 17:01:10.020501 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:10 crc kubenswrapper[4818]: I0930 17:01:10.020523 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:01:10 crc kubenswrapper[4818]: E0930 17:01:10.021261 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:01:10 crc kubenswrapper[4818]: E0930 17:01:10.021413 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:01:10 crc kubenswrapper[4818]: E0930 17:01:10.021518 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:01:11 crc kubenswrapper[4818]: I0930 17:01:11.020022 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:11 crc kubenswrapper[4818]: E0930 17:01:11.020165 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:01:12 crc kubenswrapper[4818]: I0930 17:01:12.020332 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:12 crc kubenswrapper[4818]: I0930 17:01:12.020509 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:01:12 crc kubenswrapper[4818]: I0930 17:01:12.021181 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:12 crc kubenswrapper[4818]: E0930 17:01:12.021556 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:01:12 crc kubenswrapper[4818]: E0930 17:01:12.021704 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:01:12 crc kubenswrapper[4818]: E0930 17:01:12.021855 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:01:12 crc kubenswrapper[4818]: I0930 17:01:12.023115 4818 scope.go:117] "RemoveContainer" containerID="fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7" Sep 30 17:01:12 crc kubenswrapper[4818]: I0930 17:01:12.667367 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-ljmfd_68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/ovnkube-controller/3.log" Sep 30 17:01:12 crc kubenswrapper[4818]: I0930 17:01:12.670457 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerStarted","Data":"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a"} Sep 30 17:01:12 crc kubenswrapper[4818]: I0930 17:01:12.670901 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 17:01:12 crc kubenswrapper[4818]: I0930 17:01:12.931495 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" podStartSLOduration=93.931466655 podStartE2EDuration="1m33.931466655s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:12.703128444 +0000 UTC m=+119.457400290" watchObservedRunningTime="2025-09-30 17:01:12.931466655 +0000 UTC m=+119.685738501" Sep 30 17:01:12 crc kubenswrapper[4818]: I0930 17:01:12.933051 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-4p4hg"] Sep 30 17:01:12 crc kubenswrapper[4818]: I0930 17:01:12.933171 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:01:12 crc kubenswrapper[4818]: E0930 17:01:12.933313 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:01:13 crc kubenswrapper[4818]: I0930 17:01:13.019894 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:13 crc kubenswrapper[4818]: E0930 17:01:13.020087 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:01:13 crc kubenswrapper[4818]: I0930 17:01:13.675237 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hq6j2_d36fce8a-ff27-48bf-be9c-67fc2046136d/kube-multus/1.log" Sep 30 17:01:13 crc kubenswrapper[4818]: I0930 17:01:13.675744 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hq6j2_d36fce8a-ff27-48bf-be9c-67fc2046136d/kube-multus/0.log" Sep 30 17:01:13 crc kubenswrapper[4818]: I0930 17:01:13.675798 4818 generic.go:334] "Generic (PLEG): container finished" podID="d36fce8a-ff27-48bf-be9c-67fc2046136d" containerID="9b2cdabb8638db6c90e9b3623898192035c1688291bf0f7e0ffbd32f2cd12d35" exitCode=1 Sep 30 17:01:13 crc kubenswrapper[4818]: I0930 17:01:13.675912 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hq6j2" event={"ID":"d36fce8a-ff27-48bf-be9c-67fc2046136d","Type":"ContainerDied","Data":"9b2cdabb8638db6c90e9b3623898192035c1688291bf0f7e0ffbd32f2cd12d35"} Sep 30 17:01:13 crc kubenswrapper[4818]: I0930 17:01:13.675994 4818 scope.go:117] "RemoveContainer" containerID="dfc0f4bb33189a7a0daa540b415f3e8e6b686e2ee6d48c7e600f9796acb117d6" Sep 30 17:01:13 crc kubenswrapper[4818]: I0930 17:01:13.676695 4818 scope.go:117] "RemoveContainer" containerID="9b2cdabb8638db6c90e9b3623898192035c1688291bf0f7e0ffbd32f2cd12d35" Sep 30 17:01:13 crc kubenswrapper[4818]: E0930 17:01:13.676870 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-hq6j2_openshift-multus(d36fce8a-ff27-48bf-be9c-67fc2046136d)\"" pod="openshift-multus/multus-hq6j2" podUID="d36fce8a-ff27-48bf-be9c-67fc2046136d" Sep 30 17:01:14 crc kubenswrapper[4818]: E0930 17:01:14.014200 4818 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Sep 30 17:01:14 crc kubenswrapper[4818]: I0930 17:01:14.019699 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:14 crc kubenswrapper[4818]: I0930 17:01:14.019847 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:14 crc kubenswrapper[4818]: E0930 17:01:14.021410 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:01:14 crc kubenswrapper[4818]: E0930 17:01:14.021656 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:01:14 crc kubenswrapper[4818]: E0930 17:01:14.158413 4818 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:01:14 crc kubenswrapper[4818]: I0930 17:01:14.682599 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hq6j2_d36fce8a-ff27-48bf-be9c-67fc2046136d/kube-multus/1.log" Sep 30 17:01:15 crc kubenswrapper[4818]: I0930 17:01:15.019638 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:01:15 crc kubenswrapper[4818]: I0930 17:01:15.019696 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:15 crc kubenswrapper[4818]: E0930 17:01:15.019893 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:01:15 crc kubenswrapper[4818]: E0930 17:01:15.020034 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:01:16 crc kubenswrapper[4818]: I0930 17:01:16.020126 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:16 crc kubenswrapper[4818]: I0930 17:01:16.020282 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:16 crc kubenswrapper[4818]: E0930 17:01:16.020547 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:01:16 crc kubenswrapper[4818]: E0930 17:01:16.020308 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:01:17 crc kubenswrapper[4818]: I0930 17:01:17.020470 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:17 crc kubenswrapper[4818]: E0930 17:01:17.020676 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:01:17 crc kubenswrapper[4818]: I0930 17:01:17.020490 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:01:17 crc kubenswrapper[4818]: E0930 17:01:17.021076 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:01:18 crc kubenswrapper[4818]: I0930 17:01:18.020343 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:18 crc kubenswrapper[4818]: E0930 17:01:18.020515 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:01:18 crc kubenswrapper[4818]: I0930 17:01:18.020372 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:18 crc kubenswrapper[4818]: E0930 17:01:18.020876 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:01:19 crc kubenswrapper[4818]: I0930 17:01:19.019995 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:01:19 crc kubenswrapper[4818]: E0930 17:01:19.020449 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:01:19 crc kubenswrapper[4818]: I0930 17:01:19.020047 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:19 crc kubenswrapper[4818]: E0930 17:01:19.020759 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:01:19 crc kubenswrapper[4818]: E0930 17:01:19.160537 4818 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:01:20 crc kubenswrapper[4818]: I0930 17:01:20.020254 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:20 crc kubenswrapper[4818]: I0930 17:01:20.020355 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:20 crc kubenswrapper[4818]: E0930 17:01:20.020441 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:01:20 crc kubenswrapper[4818]: E0930 17:01:20.020539 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:01:21 crc kubenswrapper[4818]: I0930 17:01:21.019501 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:21 crc kubenswrapper[4818]: E0930 17:01:21.019615 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:01:21 crc kubenswrapper[4818]: I0930 17:01:21.019507 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:01:21 crc kubenswrapper[4818]: E0930 17:01:21.019784 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:01:22 crc kubenswrapper[4818]: I0930 17:01:22.020251 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:22 crc kubenswrapper[4818]: I0930 17:01:22.020375 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:22 crc kubenswrapper[4818]: E0930 17:01:22.020438 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:01:22 crc kubenswrapper[4818]: E0930 17:01:22.020566 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:01:23 crc kubenswrapper[4818]: I0930 17:01:23.020421 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:23 crc kubenswrapper[4818]: I0930 17:01:23.020421 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:01:23 crc kubenswrapper[4818]: E0930 17:01:23.020613 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:01:23 crc kubenswrapper[4818]: E0930 17:01:23.020850 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:01:24 crc kubenswrapper[4818]: I0930 17:01:24.019375 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:24 crc kubenswrapper[4818]: I0930 17:01:24.019391 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:24 crc kubenswrapper[4818]: E0930 17:01:24.021615 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:01:24 crc kubenswrapper[4818]: E0930 17:01:24.021741 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:01:24 crc kubenswrapper[4818]: E0930 17:01:24.161704 4818 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:01:25 crc kubenswrapper[4818]: I0930 17:01:25.019516 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:01:25 crc kubenswrapper[4818]: I0930 17:01:25.019629 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:25 crc kubenswrapper[4818]: E0930 17:01:25.020411 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:01:25 crc kubenswrapper[4818]: E0930 17:01:25.021320 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:01:26 crc kubenswrapper[4818]: I0930 17:01:26.020004 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:26 crc kubenswrapper[4818]: E0930 17:01:26.020730 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:01:26 crc kubenswrapper[4818]: I0930 17:01:26.019988 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:26 crc kubenswrapper[4818]: E0930 17:01:26.020853 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:01:27 crc kubenswrapper[4818]: I0930 17:01:27.019784 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:01:27 crc kubenswrapper[4818]: I0930 17:01:27.019848 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:27 crc kubenswrapper[4818]: E0930 17:01:27.021104 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:01:27 crc kubenswrapper[4818]: E0930 17:01:27.021284 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:01:28 crc kubenswrapper[4818]: I0930 17:01:28.019506 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:28 crc kubenswrapper[4818]: I0930 17:01:28.019506 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:28 crc kubenswrapper[4818]: E0930 17:01:28.019689 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:01:28 crc kubenswrapper[4818]: E0930 17:01:28.019824 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:01:28 crc kubenswrapper[4818]: I0930 17:01:28.020333 4818 scope.go:117] "RemoveContainer" containerID="9b2cdabb8638db6c90e9b3623898192035c1688291bf0f7e0ffbd32f2cd12d35" Sep 30 17:01:28 crc kubenswrapper[4818]: I0930 17:01:28.742067 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hq6j2_d36fce8a-ff27-48bf-be9c-67fc2046136d/kube-multus/1.log" Sep 30 17:01:28 crc kubenswrapper[4818]: I0930 17:01:28.742647 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hq6j2" event={"ID":"d36fce8a-ff27-48bf-be9c-67fc2046136d","Type":"ContainerStarted","Data":"16f00b29df5ca66c8eb980b856c8659e891bb2ee5eec8c4baf8196533a20321b"} Sep 30 17:01:29 crc kubenswrapper[4818]: I0930 17:01:29.020436 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:29 crc kubenswrapper[4818]: I0930 17:01:29.020505 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:01:29 crc kubenswrapper[4818]: E0930 17:01:29.020661 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:01:29 crc kubenswrapper[4818]: E0930 17:01:29.021147 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:01:29 crc kubenswrapper[4818]: E0930 17:01:29.163773 4818 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:01:30 crc kubenswrapper[4818]: I0930 17:01:30.020164 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:30 crc kubenswrapper[4818]: I0930 17:01:30.020222 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:30 crc kubenswrapper[4818]: E0930 17:01:30.020339 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:01:30 crc kubenswrapper[4818]: E0930 17:01:30.020801 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:01:31 crc kubenswrapper[4818]: I0930 17:01:31.011461 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 17:01:31 crc kubenswrapper[4818]: I0930 17:01:31.019429 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:01:31 crc kubenswrapper[4818]: I0930 17:01:31.019545 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:31 crc kubenswrapper[4818]: E0930 17:01:31.019629 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:01:31 crc kubenswrapper[4818]: E0930 17:01:31.019728 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:01:32 crc kubenswrapper[4818]: I0930 17:01:32.020138 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:32 crc kubenswrapper[4818]: I0930 17:01:32.020209 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:32 crc kubenswrapper[4818]: E0930 17:01:32.020322 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:01:32 crc kubenswrapper[4818]: E0930 17:01:32.020424 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:01:33 crc kubenswrapper[4818]: I0930 17:01:33.019793 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:33 crc kubenswrapper[4818]: I0930 17:01:33.019984 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:01:33 crc kubenswrapper[4818]: E0930 17:01:33.020012 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 30 17:01:33 crc kubenswrapper[4818]: E0930 17:01:33.020285 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4p4hg" podUID="3712d08f-58c2-4fff-9d9f-443ba37fc9c0" Sep 30 17:01:34 crc kubenswrapper[4818]: I0930 17:01:34.020285 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:34 crc kubenswrapper[4818]: I0930 17:01:34.020295 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:34 crc kubenswrapper[4818]: E0930 17:01:34.022735 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 30 17:01:34 crc kubenswrapper[4818]: E0930 17:01:34.023030 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 30 17:01:35 crc kubenswrapper[4818]: I0930 17:01:35.019591 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:35 crc kubenswrapper[4818]: I0930 17:01:35.019609 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:01:35 crc kubenswrapper[4818]: I0930 17:01:35.023617 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Sep 30 17:01:35 crc kubenswrapper[4818]: I0930 17:01:35.023792 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Sep 30 17:01:35 crc kubenswrapper[4818]: I0930 17:01:35.024010 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Sep 30 17:01:35 crc kubenswrapper[4818]: I0930 17:01:35.025535 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Sep 30 17:01:36 crc kubenswrapper[4818]: I0930 17:01:36.019793 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:36 crc kubenswrapper[4818]: I0930 17:01:36.019890 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:36 crc kubenswrapper[4818]: I0930 17:01:36.022904 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Sep 30 17:01:36 crc kubenswrapper[4818]: I0930 17:01:36.024704 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Sep 30 17:01:42 crc kubenswrapper[4818]: I0930 17:01:42.006010 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:42 crc kubenswrapper[4818]: E0930 17:01:42.006132 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:03:44.006103202 +0000 UTC m=+270.760375018 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:42 crc kubenswrapper[4818]: I0930 17:01:42.006290 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:42 crc kubenswrapper[4818]: I0930 17:01:42.006369 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:42 crc kubenswrapper[4818]: I0930 17:01:42.007453 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:42 crc kubenswrapper[4818]: I0930 17:01:42.015606 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:42 crc kubenswrapper[4818]: I0930 17:01:42.045187 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 30 17:01:42 crc kubenswrapper[4818]: I0930 17:01:42.107990 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:42 crc kubenswrapper[4818]: I0930 17:01:42.108058 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:42 crc kubenswrapper[4818]: I0930 17:01:42.112699 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:42 crc kubenswrapper[4818]: I0930 17:01:42.113589 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:42 crc kubenswrapper[4818]: I0930 17:01:42.245068 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 30 17:01:42 crc kubenswrapper[4818]: I0930 17:01:42.359367 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:42 crc kubenswrapper[4818]: W0930 17:01:42.420865 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-1e30dbe1cf961de660b40e935eab813fbc0162e909155039b2ad4ad098d4d9c3 WatchSource:0}: Error finding container 1e30dbe1cf961de660b40e935eab813fbc0162e909155039b2ad4ad098d4d9c3: Status 404 returned error can't find the container with id 1e30dbe1cf961de660b40e935eab813fbc0162e909155039b2ad4ad098d4d9c3 Sep 30 17:01:42 crc kubenswrapper[4818]: W0930 17:01:42.549476 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-7af3226e354a04d109ab1df8c3927f26ca5b9297ff611ea9eaada6a3a805d299 WatchSource:0}: Error finding container 7af3226e354a04d109ab1df8c3927f26ca5b9297ff611ea9eaada6a3a805d299: Status 404 returned error can't find the container with id 7af3226e354a04d109ab1df8c3927f26ca5b9297ff611ea9eaada6a3a805d299 Sep 30 17:01:42 crc kubenswrapper[4818]: I0930 17:01:42.804380 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"299c030d8ef0556dd619d79060d89b06a0bcfe8a1e10b3a77b617e78d2c5f4f0"} Sep 30 17:01:42 crc kubenswrapper[4818]: I0930 17:01:42.804840 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"7af3226e354a04d109ab1df8c3927f26ca5b9297ff611ea9eaada6a3a805d299"} Sep 30 17:01:42 crc kubenswrapper[4818]: I0930 17:01:42.805182 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:01:42 crc kubenswrapper[4818]: I0930 17:01:42.809761 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"abd32375e5fd842b629bdb963d14be6b441f3919c14a7738cea72f897c5111ca"} Sep 30 17:01:42 crc kubenswrapper[4818]: I0930 17:01:42.809844 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"2c35d5bd383d62058f88a10e1a852ab47ffa67f7a0bd1756ee263945d349945c"} Sep 30 17:01:42 crc kubenswrapper[4818]: I0930 17:01:42.813475 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"21aadcf4c5dff89515a5d37dd9273cd98d4d5444d8e83291191ee6db4823a197"} Sep 30 17:01:42 crc kubenswrapper[4818]: I0930 17:01:42.813549 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"1e30dbe1cf961de660b40e935eab813fbc0162e909155039b2ad4ad098d4d9c3"} Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.673038 4818 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.731797 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-qv4dz"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.732362 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.733317 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.733813 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.744415 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.746630 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.746680 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.746689 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.747086 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.747278 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.747302 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.747304 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.747500 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.747915 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.748008 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.748103 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.748298 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.748387 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.748415 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.748454 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.749696 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-4chv9"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.750577 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4chv9" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.750719 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xkh6t"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.751172 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.752773 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-7rnqd"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.753266 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-7rnqd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.753551 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.758587 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.758735 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.760015 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.760522 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.768761 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.769063 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.769222 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.769391 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.770271 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.771051 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.771334 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-m7rls"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.771663 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-m7rls" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.772980 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.773264 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.777326 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.777605 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.777709 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.777851 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.777948 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.778012 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.778122 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.778131 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.778165 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.778321 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.778404 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.778453 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.778495 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.778578 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.783986 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-7r27t"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.784499 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-x4d8s"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.784905 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-x4d8s" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.785424 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-7r27t" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.789791 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cgfst"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.790619 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cgfst" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.811112 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.811321 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.820488 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.821442 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-wr9kd"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.821851 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.828126 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.828326 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.830999 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5n9lk"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.840695 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5ms7\" (UniqueName: \"kubernetes.io/projected/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-kube-api-access-r5ms7\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.840758 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2j4mr\" (UniqueName: \"kubernetes.io/projected/8b00b5de-92e6-45ef-bd66-2f06b0b0e249-kube-api-access-2j4mr\") pod \"downloads-7954f5f757-7r27t\" (UID: \"8b00b5de-92e6-45ef-bd66-2f06b0b0e249\") " pod="openshift-console/downloads-7954f5f757-7r27t" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.840794 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bpth\" (UniqueName: \"kubernetes.io/projected/e3243d1f-1bcb-48cf-83e2-7e6eeaf15126-kube-api-access-6bpth\") pod \"machine-approver-56656f9798-4chv9\" (UID: \"e3243d1f-1bcb-48cf-83e2-7e6eeaf15126\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4chv9" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.840822 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4bb9049c-5f9a-4260-bd40-3140669d6701-config\") pod \"machine-api-operator-5694c8668f-7rnqd\" (UID: \"4bb9049c-5f9a-4260-bd40-3140669d6701\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7rnqd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.840853 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f7a2ab63-3622-49c3-abef-fc6ff98758e4-client-ca\") pod \"controller-manager-879f6c89f-xkh6t\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.840883 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/95508e9a-38fe-4c90-83ee-1b87733b07a6-encryption-config\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.840914 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a07c676e-64cc-4962-8d9a-251ab4c93d60-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-p8vq2\" (UID: \"a07c676e-64cc-4962-8d9a-251ab4c93d60\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.840961 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4709760d-9993-42d3-97c3-bd5470b9c8ab-console-oauth-config\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.840991 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e3243d1f-1bcb-48cf-83e2-7e6eeaf15126-auth-proxy-config\") pod \"machine-approver-56656f9798-4chv9\" (UID: \"e3243d1f-1bcb-48cf-83e2-7e6eeaf15126\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4chv9" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841041 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/95508e9a-38fe-4c90-83ee-1b87733b07a6-audit\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841068 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3243d1f-1bcb-48cf-83e2-7e6eeaf15126-config\") pod \"machine-approver-56656f9798-4chv9\" (UID: \"e3243d1f-1bcb-48cf-83e2-7e6eeaf15126\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4chv9" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841094 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4bb9049c-5f9a-4260-bd40-3140669d6701-images\") pod \"machine-api-operator-5694c8668f-7rnqd\" (UID: \"4bb9049c-5f9a-4260-bd40-3140669d6701\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7rnqd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841117 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7a2ab63-3622-49c3-abef-fc6ff98758e4-serving-cert\") pod \"controller-manager-879f6c89f-xkh6t\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841145 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4bb9049c-5f9a-4260-bd40-3140669d6701-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-7rnqd\" (UID: \"4bb9049c-5f9a-4260-bd40-3140669d6701\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7rnqd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841198 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8scfl\" (UniqueName: \"kubernetes.io/projected/7026c744-378e-4b55-b738-e295be19ef2a-kube-api-access-8scfl\") pod \"openshift-controller-manager-operator-756b6f6bc6-x4d8s\" (UID: \"7026c744-378e-4b55-b738-e295be19ef2a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-x4d8s" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841228 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7a2ab63-3622-49c3-abef-fc6ff98758e4-config\") pod \"controller-manager-879f6c89f-xkh6t\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841248 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-etcd-client\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841277 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e75be35-3cb2-42b8-b16f-e434ea90c8ef-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-cgfst\" (UID: \"3e75be35-3cb2-42b8-b16f-e434ea90c8ef\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cgfst" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841309 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwr9g\" (UniqueName: \"kubernetes.io/projected/dde52314-bdf5-4c51-90e2-a258a21ec712-kube-api-access-dwr9g\") pod \"openshift-config-operator-7777fb866f-lnpjt\" (UID: \"dde52314-bdf5-4c51-90e2-a258a21ec712\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841339 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a07c676e-64cc-4962-8d9a-251ab4c93d60-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-p8vq2\" (UID: \"a07c676e-64cc-4962-8d9a-251ab4c93d60\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841371 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-oauth-serving-cert\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841396 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drw4c\" (UniqueName: \"kubernetes.io/projected/95508e9a-38fe-4c90-83ee-1b87733b07a6-kube-api-access-drw4c\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841424 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rk8l\" (UniqueName: \"kubernetes.io/projected/4bb9049c-5f9a-4260-bd40-3140669d6701-kube-api-access-8rk8l\") pod \"machine-api-operator-5694c8668f-7rnqd\" (UID: \"4bb9049c-5f9a-4260-bd40-3140669d6701\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7rnqd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841453 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/95508e9a-38fe-4c90-83ee-1b87733b07a6-etcd-client\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841480 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f7a2ab63-3622-49c3-abef-fc6ff98758e4-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-xkh6t\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841501 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-console-config\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841527 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95508e9a-38fe-4c90-83ee-1b87733b07a6-config\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841553 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/95508e9a-38fe-4c90-83ee-1b87733b07a6-audit-dir\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841581 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/95508e9a-38fe-4c90-83ee-1b87733b07a6-serving-cert\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841607 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsdpr\" (UniqueName: \"kubernetes.io/projected/3a715e0d-75ba-45d9-a213-33a6a10f4b9f-kube-api-access-qsdpr\") pod \"console-operator-58897d9998-m7rls\" (UID: \"3a715e0d-75ba-45d9-a213-33a6a10f4b9f\") " pod="openshift-console-operator/console-operator-58897d9998-m7rls" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841634 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-trusted-ca-bundle\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841658 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6qmz\" (UniqueName: \"kubernetes.io/projected/4709760d-9993-42d3-97c3-bd5470b9c8ab-kube-api-access-m6qmz\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841685 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ds77c\" (UniqueName: \"kubernetes.io/projected/f7a2ab63-3622-49c3-abef-fc6ff98758e4-kube-api-access-ds77c\") pod \"controller-manager-879f6c89f-xkh6t\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841709 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a715e0d-75ba-45d9-a213-33a6a10f4b9f-config\") pod \"console-operator-58897d9998-m7rls\" (UID: \"3a715e0d-75ba-45d9-a213-33a6a10f4b9f\") " pod="openshift-console-operator/console-operator-58897d9998-m7rls" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841734 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dde52314-bdf5-4c51-90e2-a258a21ec712-serving-cert\") pod \"openshift-config-operator-7777fb866f-lnpjt\" (UID: \"dde52314-bdf5-4c51-90e2-a258a21ec712\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841761 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-audit-dir\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841786 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/95508e9a-38fe-4c90-83ee-1b87733b07a6-image-import-ca\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841813 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/dde52314-bdf5-4c51-90e2-a258a21ec712-available-featuregates\") pod \"openshift-config-operator-7777fb866f-lnpjt\" (UID: \"dde52314-bdf5-4c51-90e2-a258a21ec712\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841835 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/a07c676e-64cc-4962-8d9a-251ab4c93d60-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-p8vq2\" (UID: \"a07c676e-64cc-4962-8d9a-251ab4c93d60\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841865 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7026c744-378e-4b55-b738-e295be19ef2a-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-x4d8s\" (UID: \"7026c744-378e-4b55-b738-e295be19ef2a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-x4d8s" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841891 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-service-ca\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.843190 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.841937 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/95508e9a-38fe-4c90-83ee-1b87733b07a6-etcd-serving-ca\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.844231 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7smv5\" (UniqueName: \"kubernetes.io/projected/a4a87b84-cb7a-4406-ac3a-473e984376a1-kube-api-access-7smv5\") pod \"route-controller-manager-6576b87f9c-xkkwg\" (UID: \"a4a87b84-cb7a-4406-ac3a-473e984376a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.844267 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e75be35-3cb2-42b8-b16f-e434ea90c8ef-config\") pod \"openshift-apiserver-operator-796bbdcf4f-cgfst\" (UID: \"3e75be35-3cb2-42b8-b16f-e434ea90c8ef\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cgfst" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.844297 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-encryption-config\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.844327 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4709760d-9993-42d3-97c3-bd5470b9c8ab-console-serving-cert\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.844360 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a4a87b84-cb7a-4406-ac3a-473e984376a1-client-ca\") pod \"route-controller-manager-6576b87f9c-xkkwg\" (UID: \"a4a87b84-cb7a-4406-ac3a-473e984376a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.844386 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.844411 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3a715e0d-75ba-45d9-a213-33a6a10f4b9f-serving-cert\") pod \"console-operator-58897d9998-m7rls\" (UID: \"3a715e0d-75ba-45d9-a213-33a6a10f4b9f\") " pod="openshift-console-operator/console-operator-58897d9998-m7rls" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.844441 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/e3243d1f-1bcb-48cf-83e2-7e6eeaf15126-machine-approver-tls\") pod \"machine-approver-56656f9798-4chv9\" (UID: \"e3243d1f-1bcb-48cf-83e2-7e6eeaf15126\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4chv9" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.844470 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-serving-cert\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.844498 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3a715e0d-75ba-45d9-a213-33a6a10f4b9f-trusted-ca\") pod \"console-operator-58897d9998-m7rls\" (UID: \"3a715e0d-75ba-45d9-a213-33a6a10f4b9f\") " pod="openshift-console-operator/console-operator-58897d9998-m7rls" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.844525 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-audit-policies\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.844549 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.844579 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zckkr\" (UniqueName: \"kubernetes.io/projected/3e75be35-3cb2-42b8-b16f-e434ea90c8ef-kube-api-access-zckkr\") pod \"openshift-apiserver-operator-796bbdcf4f-cgfst\" (UID: \"3e75be35-3cb2-42b8-b16f-e434ea90c8ef\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cgfst" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.844608 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4dds\" (UniqueName: \"kubernetes.io/projected/a07c676e-64cc-4962-8d9a-251ab4c93d60-kube-api-access-r4dds\") pod \"cluster-image-registry-operator-dc59b4c8b-p8vq2\" (UID: \"a07c676e-64cc-4962-8d9a-251ab4c93d60\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.844641 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/95508e9a-38fe-4c90-83ee-1b87733b07a6-trusted-ca-bundle\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.844664 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4a87b84-cb7a-4406-ac3a-473e984376a1-config\") pod \"route-controller-manager-6576b87f9c-xkkwg\" (UID: \"a4a87b84-cb7a-4406-ac3a-473e984376a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.844691 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4a87b84-cb7a-4406-ac3a-473e984376a1-serving-cert\") pod \"route-controller-manager-6576b87f9c-xkkwg\" (UID: \"a4a87b84-cb7a-4406-ac3a-473e984376a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.844748 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7026c744-378e-4b55-b738-e295be19ef2a-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-x4d8s\" (UID: \"7026c744-378e-4b55-b738-e295be19ef2a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-x4d8s" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.844783 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/95508e9a-38fe-4c90-83ee-1b87733b07a6-node-pullsecrets\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.845106 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.846853 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5n9lk" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.848033 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.850738 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.884107 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.884197 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.884673 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.884701 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.884721 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.884832 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.886847 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-fbfxd"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.887391 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-888xq"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.887589 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.887703 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-w66fc"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.887765 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.888117 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.888494 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.888815 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.890848 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kml8f"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.891380 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-fppn6"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.891620 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.891776 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-fppn6" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.891867 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.892160 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.892301 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.893003 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.893102 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.893218 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.893302 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.893373 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.893450 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.893524 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.893609 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.893684 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.894024 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.894076 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.894122 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.894198 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.894269 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.894307 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.894375 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.894499 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.894274 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.894620 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.894624 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.895953 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.900377 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.911671 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.912174 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-j5fc5"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.912750 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-j5fc5" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.913194 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j5wfn"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.913664 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j5wfn" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.914759 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-gnmfp"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.915247 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.915977 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.916432 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.916518 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.917796 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-ll9ll"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.918311 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ll9ll" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.918828 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.918839 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.921158 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.921373 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.921899 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.922018 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.922046 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.922071 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.922170 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.922199 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.922242 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.922200 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.922265 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.922300 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.922324 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.922339 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.922269 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.922553 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.931985 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-q8cn5"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.935480 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9g8j4"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.941754 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.957843 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-q8cn5" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.957868 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.958076 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.959353 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.960997 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.961627 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/95508e9a-38fe-4c90-83ee-1b87733b07a6-etcd-client\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.961665 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f7a2ab63-3622-49c3-abef-fc6ff98758e4-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-xkh6t\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.961690 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-console-config\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.961712 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/95508e9a-38fe-4c90-83ee-1b87733b07a6-audit-dir\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.961733 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95508e9a-38fe-4c90-83ee-1b87733b07a6-config\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.961753 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/95508e9a-38fe-4c90-83ee-1b87733b07a6-serving-cert\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.961775 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsdpr\" (UniqueName: \"kubernetes.io/projected/3a715e0d-75ba-45d9-a213-33a6a10f4b9f-kube-api-access-qsdpr\") pod \"console-operator-58897d9998-m7rls\" (UID: \"3a715e0d-75ba-45d9-a213-33a6a10f4b9f\") " pod="openshift-console-operator/console-operator-58897d9998-m7rls" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.961798 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-trusted-ca-bundle\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.961821 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6qmz\" (UniqueName: \"kubernetes.io/projected/4709760d-9993-42d3-97c3-bd5470b9c8ab-kube-api-access-m6qmz\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.961843 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ds77c\" (UniqueName: \"kubernetes.io/projected/f7a2ab63-3622-49c3-abef-fc6ff98758e4-kube-api-access-ds77c\") pod \"controller-manager-879f6c89f-xkh6t\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.961849 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/95508e9a-38fe-4c90-83ee-1b87733b07a6-audit-dir\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.961863 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a715e0d-75ba-45d9-a213-33a6a10f4b9f-config\") pod \"console-operator-58897d9998-m7rls\" (UID: \"3a715e0d-75ba-45d9-a213-33a6a10f4b9f\") " pod="openshift-console-operator/console-operator-58897d9998-m7rls" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.961884 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-audit-dir\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.961905 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dde52314-bdf5-4c51-90e2-a258a21ec712-serving-cert\") pod \"openshift-config-operator-7777fb866f-lnpjt\" (UID: \"dde52314-bdf5-4c51-90e2-a258a21ec712\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.961947 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/95508e9a-38fe-4c90-83ee-1b87733b07a6-image-import-ca\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.961970 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/dde52314-bdf5-4c51-90e2-a258a21ec712-available-featuregates\") pod \"openshift-config-operator-7777fb866f-lnpjt\" (UID: \"dde52314-bdf5-4c51-90e2-a258a21ec712\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.961993 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/a07c676e-64cc-4962-8d9a-251ab4c93d60-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-p8vq2\" (UID: \"a07c676e-64cc-4962-8d9a-251ab4c93d60\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962015 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-service-ca\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962038 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7026c744-378e-4b55-b738-e295be19ef2a-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-x4d8s\" (UID: \"7026c744-378e-4b55-b738-e295be19ef2a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-x4d8s" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962057 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/95508e9a-38fe-4c90-83ee-1b87733b07a6-etcd-serving-ca\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962076 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7smv5\" (UniqueName: \"kubernetes.io/projected/a4a87b84-cb7a-4406-ac3a-473e984376a1-kube-api-access-7smv5\") pod \"route-controller-manager-6576b87f9c-xkkwg\" (UID: \"a4a87b84-cb7a-4406-ac3a-473e984376a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962097 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e75be35-3cb2-42b8-b16f-e434ea90c8ef-config\") pod \"openshift-apiserver-operator-796bbdcf4f-cgfst\" (UID: \"3e75be35-3cb2-42b8-b16f-e434ea90c8ef\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cgfst" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962118 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-encryption-config\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962138 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4709760d-9993-42d3-97c3-bd5470b9c8ab-console-serving-cert\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962162 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a4a87b84-cb7a-4406-ac3a-473e984376a1-client-ca\") pod \"route-controller-manager-6576b87f9c-xkkwg\" (UID: \"a4a87b84-cb7a-4406-ac3a-473e984376a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962186 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962207 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3a715e0d-75ba-45d9-a213-33a6a10f4b9f-serving-cert\") pod \"console-operator-58897d9998-m7rls\" (UID: \"3a715e0d-75ba-45d9-a213-33a6a10f4b9f\") " pod="openshift-console-operator/console-operator-58897d9998-m7rls" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962227 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/e3243d1f-1bcb-48cf-83e2-7e6eeaf15126-machine-approver-tls\") pod \"machine-approver-56656f9798-4chv9\" (UID: \"e3243d1f-1bcb-48cf-83e2-7e6eeaf15126\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4chv9" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962249 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-serving-cert\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962270 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-audit-policies\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962289 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962312 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zckkr\" (UniqueName: \"kubernetes.io/projected/3e75be35-3cb2-42b8-b16f-e434ea90c8ef-kube-api-access-zckkr\") pod \"openshift-apiserver-operator-796bbdcf4f-cgfst\" (UID: \"3e75be35-3cb2-42b8-b16f-e434ea90c8ef\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cgfst" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962334 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3a715e0d-75ba-45d9-a213-33a6a10f4b9f-trusted-ca\") pod \"console-operator-58897d9998-m7rls\" (UID: \"3a715e0d-75ba-45d9-a213-33a6a10f4b9f\") " pod="openshift-console-operator/console-operator-58897d9998-m7rls" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962357 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4dds\" (UniqueName: \"kubernetes.io/projected/a07c676e-64cc-4962-8d9a-251ab4c93d60-kube-api-access-r4dds\") pod \"cluster-image-registry-operator-dc59b4c8b-p8vq2\" (UID: \"a07c676e-64cc-4962-8d9a-251ab4c93d60\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962378 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/95508e9a-38fe-4c90-83ee-1b87733b07a6-trusted-ca-bundle\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962397 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4a87b84-cb7a-4406-ac3a-473e984376a1-config\") pod \"route-controller-manager-6576b87f9c-xkkwg\" (UID: \"a4a87b84-cb7a-4406-ac3a-473e984376a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962417 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4a87b84-cb7a-4406-ac3a-473e984376a1-serving-cert\") pod \"route-controller-manager-6576b87f9c-xkkwg\" (UID: \"a4a87b84-cb7a-4406-ac3a-473e984376a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962438 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7026c744-378e-4b55-b738-e295be19ef2a-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-x4d8s\" (UID: \"7026c744-378e-4b55-b738-e295be19ef2a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-x4d8s" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962463 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/95508e9a-38fe-4c90-83ee-1b87733b07a6-node-pullsecrets\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962494 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5ms7\" (UniqueName: \"kubernetes.io/projected/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-kube-api-access-r5ms7\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962517 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2j4mr\" (UniqueName: \"kubernetes.io/projected/8b00b5de-92e6-45ef-bd66-2f06b0b0e249-kube-api-access-2j4mr\") pod \"downloads-7954f5f757-7r27t\" (UID: \"8b00b5de-92e6-45ef-bd66-2f06b0b0e249\") " pod="openshift-console/downloads-7954f5f757-7r27t" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962540 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bpth\" (UniqueName: \"kubernetes.io/projected/e3243d1f-1bcb-48cf-83e2-7e6eeaf15126-kube-api-access-6bpth\") pod \"machine-approver-56656f9798-4chv9\" (UID: \"e3243d1f-1bcb-48cf-83e2-7e6eeaf15126\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4chv9" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962561 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4bb9049c-5f9a-4260-bd40-3140669d6701-config\") pod \"machine-api-operator-5694c8668f-7rnqd\" (UID: \"4bb9049c-5f9a-4260-bd40-3140669d6701\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7rnqd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962585 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f7a2ab63-3622-49c3-abef-fc6ff98758e4-client-ca\") pod \"controller-manager-879f6c89f-xkh6t\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962606 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/95508e9a-38fe-4c90-83ee-1b87733b07a6-encryption-config\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962627 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a07c676e-64cc-4962-8d9a-251ab4c93d60-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-p8vq2\" (UID: \"a07c676e-64cc-4962-8d9a-251ab4c93d60\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962648 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4709760d-9993-42d3-97c3-bd5470b9c8ab-console-oauth-config\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962673 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e3243d1f-1bcb-48cf-83e2-7e6eeaf15126-auth-proxy-config\") pod \"machine-approver-56656f9798-4chv9\" (UID: \"e3243d1f-1bcb-48cf-83e2-7e6eeaf15126\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4chv9" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962695 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/95508e9a-38fe-4c90-83ee-1b87733b07a6-audit\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962716 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3243d1f-1bcb-48cf-83e2-7e6eeaf15126-config\") pod \"machine-approver-56656f9798-4chv9\" (UID: \"e3243d1f-1bcb-48cf-83e2-7e6eeaf15126\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4chv9" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962737 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4bb9049c-5f9a-4260-bd40-3140669d6701-images\") pod \"machine-api-operator-5694c8668f-7rnqd\" (UID: \"4bb9049c-5f9a-4260-bd40-3140669d6701\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7rnqd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962758 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4bb9049c-5f9a-4260-bd40-3140669d6701-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-7rnqd\" (UID: \"4bb9049c-5f9a-4260-bd40-3140669d6701\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7rnqd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962778 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8scfl\" (UniqueName: \"kubernetes.io/projected/7026c744-378e-4b55-b738-e295be19ef2a-kube-api-access-8scfl\") pod \"openshift-controller-manager-operator-756b6f6bc6-x4d8s\" (UID: \"7026c744-378e-4b55-b738-e295be19ef2a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-x4d8s" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962799 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7a2ab63-3622-49c3-abef-fc6ff98758e4-serving-cert\") pod \"controller-manager-879f6c89f-xkh6t\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962816 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-etcd-client\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962835 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e75be35-3cb2-42b8-b16f-e434ea90c8ef-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-cgfst\" (UID: \"3e75be35-3cb2-42b8-b16f-e434ea90c8ef\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cgfst" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962850 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7a2ab63-3622-49c3-abef-fc6ff98758e4-config\") pod \"controller-manager-879f6c89f-xkh6t\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962867 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwr9g\" (UniqueName: \"kubernetes.io/projected/dde52314-bdf5-4c51-90e2-a258a21ec712-kube-api-access-dwr9g\") pod \"openshift-config-operator-7777fb866f-lnpjt\" (UID: \"dde52314-bdf5-4c51-90e2-a258a21ec712\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962882 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a07c676e-64cc-4962-8d9a-251ab4c93d60-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-p8vq2\" (UID: \"a07c676e-64cc-4962-8d9a-251ab4c93d60\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962900 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-oauth-serving-cert\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962938 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drw4c\" (UniqueName: \"kubernetes.io/projected/95508e9a-38fe-4c90-83ee-1b87733b07a6-kube-api-access-drw4c\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.962961 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rk8l\" (UniqueName: \"kubernetes.io/projected/4bb9049c-5f9a-4260-bd40-3140669d6701-kube-api-access-8rk8l\") pod \"machine-api-operator-5694c8668f-7rnqd\" (UID: \"4bb9049c-5f9a-4260-bd40-3140669d6701\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7rnqd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.963701 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95508e9a-38fe-4c90-83ee-1b87733b07a6-config\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.964670 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/95508e9a-38fe-4c90-83ee-1b87733b07a6-etcd-serving-ca\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.964820 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e3243d1f-1bcb-48cf-83e2-7e6eeaf15126-auth-proxy-config\") pod \"machine-approver-56656f9798-4chv9\" (UID: \"e3243d1f-1bcb-48cf-83e2-7e6eeaf15126\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4chv9" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.964850 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e75be35-3cb2-42b8-b16f-e434ea90c8ef-config\") pod \"openshift-apiserver-operator-796bbdcf4f-cgfst\" (UID: \"3e75be35-3cb2-42b8-b16f-e434ea90c8ef\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cgfst" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.965269 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f7a2ab63-3622-49c3-abef-fc6ff98758e4-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-xkh6t\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.965514 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/95508e9a-38fe-4c90-83ee-1b87733b07a6-trusted-ca-bundle\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.965735 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-trusted-ca-bundle\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.966623 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a715e0d-75ba-45d9-a213-33a6a10f4b9f-config\") pod \"console-operator-58897d9998-m7rls\" (UID: \"3a715e0d-75ba-45d9-a213-33a6a10f4b9f\") " pod="openshift-console-operator/console-operator-58897d9998-m7rls" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.966677 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-audit-dir\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.967492 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4bb9049c-5f9a-4260-bd40-3140669d6701-config\") pod \"machine-api-operator-5694c8668f-7rnqd\" (UID: \"4bb9049c-5f9a-4260-bd40-3140669d6701\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7rnqd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.968999 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f7a2ab63-3622-49c3-abef-fc6ff98758e4-client-ca\") pod \"controller-manager-879f6c89f-xkh6t\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.970642 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-console-config\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.971334 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-encryption-config\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.971602 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4a87b84-cb7a-4406-ac3a-473e984376a1-config\") pod \"route-controller-manager-6576b87f9c-xkkwg\" (UID: \"a4a87b84-cb7a-4406-ac3a-473e984376a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.971911 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/95508e9a-38fe-4c90-83ee-1b87733b07a6-node-pullsecrets\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.973803 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/95508e9a-38fe-4c90-83ee-1b87733b07a6-etcd-client\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.989509 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a4a87b84-cb7a-4406-ac3a-473e984376a1-client-ca\") pod \"route-controller-manager-6576b87f9c-xkkwg\" (UID: \"a4a87b84-cb7a-4406-ac3a-473e984376a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.974419 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/95508e9a-38fe-4c90-83ee-1b87733b07a6-serving-cert\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.975401 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4a87b84-cb7a-4406-ac3a-473e984376a1-serving-cert\") pod \"route-controller-manager-6576b87f9c-xkkwg\" (UID: \"a4a87b84-cb7a-4406-ac3a-473e984376a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.975425 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/95508e9a-38fe-4c90-83ee-1b87733b07a6-encryption-config\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.976031 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4bb9049c-5f9a-4260-bd40-3140669d6701-images\") pod \"machine-api-operator-5694c8668f-7rnqd\" (UID: \"4bb9049c-5f9a-4260-bd40-3140669d6701\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7rnqd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.976252 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.974277 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7026c744-378e-4b55-b738-e295be19ef2a-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-x4d8s\" (UID: \"7026c744-378e-4b55-b738-e295be19ef2a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-x4d8s" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.980437 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e75be35-3cb2-42b8-b16f-e434ea90c8ef-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-cgfst\" (UID: \"3e75be35-3cb2-42b8-b16f-e434ea90c8ef\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cgfst" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.980637 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/a07c676e-64cc-4962-8d9a-251ab4c93d60-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-p8vq2\" (UID: \"a07c676e-64cc-4962-8d9a-251ab4c93d60\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.981046 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3243d1f-1bcb-48cf-83e2-7e6eeaf15126-config\") pod \"machine-approver-56656f9798-4chv9\" (UID: \"e3243d1f-1bcb-48cf-83e2-7e6eeaf15126\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4chv9" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.981118 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-etcd-client\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.983295 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.983713 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.984052 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.976844 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/95508e9a-38fe-4c90-83ee-1b87733b07a6-audit\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.984439 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/dde52314-bdf5-4c51-90e2-a258a21ec712-available-featuregates\") pod \"openshift-config-operator-7777fb866f-lnpjt\" (UID: \"dde52314-bdf5-4c51-90e2-a258a21ec712\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.985581 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7a2ab63-3622-49c3-abef-fc6ff98758e4-config\") pod \"controller-manager-879f6c89f-xkh6t\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.986590 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-audit-policies\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.987036 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7026c744-378e-4b55-b738-e295be19ef2a-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-x4d8s\" (UID: \"7026c744-378e-4b55-b738-e295be19ef2a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-x4d8s" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.987394 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3a715e0d-75ba-45d9-a213-33a6a10f4b9f-trusted-ca\") pod \"console-operator-58897d9998-m7rls\" (UID: \"3a715e0d-75ba-45d9-a213-33a6a10f4b9f\") " pod="openshift-console-operator/console-operator-58897d9998-m7rls" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.987588 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-service-ca\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.988727 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a07c676e-64cc-4962-8d9a-251ab4c93d60-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-p8vq2\" (UID: \"a07c676e-64cc-4962-8d9a-251ab4c93d60\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.988767 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7a2ab63-3622-49c3-abef-fc6ff98758e4-serving-cert\") pod \"controller-manager-879f6c89f-xkh6t\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.988904 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-oauth-serving-cert\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.977420 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.977476 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.977508 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.979578 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.990333 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-qv4dz"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.990352 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.982552 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.984135 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9g8j4" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.991482 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3a715e0d-75ba-45d9-a213-33a6a10f4b9f-serving-cert\") pod \"console-operator-58897d9998-m7rls\" (UID: \"3a715e0d-75ba-45d9-a213-33a6a10f4b9f\") " pod="openshift-console-operator/console-operator-58897d9998-m7rls" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.991910 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-serving-cert\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.993112 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4bb9049c-5f9a-4260-bd40-3140669d6701-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-7rnqd\" (UID: \"4bb9049c-5f9a-4260-bd40-3140669d6701\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7rnqd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.993394 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-96n5p"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.994003 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4709760d-9993-42d3-97c3-bd5470b9c8ab-console-oauth-config\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.994105 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.995081 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4709760d-9993-42d3-97c3-bd5470b9c8ab-console-serving-cert\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.995372 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.995850 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/95508e9a-38fe-4c90-83ee-1b87733b07a6-image-import-ca\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.996198 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.999270 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q"] Sep 30 17:01:43 crc kubenswrapper[4818]: I0930 17:01:43.999750 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xq6hp"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.000141 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.000217 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-96n5p" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.000239 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8gcpn"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.000291 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xq6hp" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.000530 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.001385 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-rkqvp"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.001677 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-mfrws"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.002137 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-mfrws" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.006027 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-rkqvp" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.006475 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.010879 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5n9lk"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.012113 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-tg7pk"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.012666 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-tg7pk" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.013877 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-scg48"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.014385 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-scg48" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.016494 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6fgm2"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.016847 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6fgm2" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.017831 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kml8f"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.018809 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.019323 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.020485 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.021228 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dde52314-bdf5-4c51-90e2-a258a21ec712-serving-cert\") pod \"openshift-config-operator-7777fb866f-lnpjt\" (UID: \"dde52314-bdf5-4c51-90e2-a258a21ec712\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.021244 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/e3243d1f-1bcb-48cf-83e2-7e6eeaf15126-machine-approver-tls\") pod \"machine-approver-56656f9798-4chv9\" (UID: \"e3243d1f-1bcb-48cf-83e2-7e6eeaf15126\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4chv9" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.029098 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-q8cn5"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.029150 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-7r27t"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.029163 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.029176 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.032860 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-7rnqd"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.032983 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.034996 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.039687 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-wr9kd"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.042901 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-kvwlg"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.053239 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-888xq"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.053379 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.054606 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.055813 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cgfst"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.056929 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xkh6t"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.056959 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-fppn6"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.062357 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.062897 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-x4d8s"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.064969 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p28xv\" (UniqueName: \"kubernetes.io/projected/94a6a5c0-845a-4f60-b111-eb28393fb07c-kube-api-access-p28xv\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065025 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/d8970c17-d95f-454b-ac56-db24223ef2fc-default-certificate\") pod \"router-default-5444994796-gnmfp\" (UID: \"d8970c17-d95f-454b-ac56-db24223ef2fc\") " pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065065 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065086 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7647753f-a8c4-498f-b876-7553155a6159-proxy-tls\") pod \"machine-config-operator-74547568cd-g6ck2\" (UID: \"7647753f-a8c4-498f-b876-7553155a6159\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065101 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbllq\" (UniqueName: \"kubernetes.io/projected/7647753f-a8c4-498f-b876-7553155a6159-kube-api-access-sbllq\") pod \"machine-config-operator-74547568cd-g6ck2\" (UID: \"7647753f-a8c4-498f-b876-7553155a6159\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065117 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgr9w\" (UniqueName: \"kubernetes.io/projected/bc973df4-6c8f-448c-a1f8-609fd7526f3e-kube-api-access-bgr9w\") pod \"cluster-samples-operator-665b6dd947-5n9lk\" (UID: \"bc973df4-6c8f-448c-a1f8-609fd7526f3e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5n9lk" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065132 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/133ce1d2-cc16-41f9-b136-d197c20847d1-metrics-tls\") pod \"dns-operator-744455d44c-q8cn5\" (UID: \"133ce1d2-cc16-41f9-b136-d197c20847d1\") " pod="openshift-dns-operator/dns-operator-744455d44c-q8cn5" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065148 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/7647753f-a8c4-498f-b876-7553155a6159-images\") pod \"machine-config-operator-74547568cd-g6ck2\" (UID: \"7647753f-a8c4-498f-b876-7553155a6159\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065426 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/90c5ca01-2b80-48f5-8a53-d7bdebb0af87-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-ll9ll\" (UID: \"90c5ca01-2b80-48f5-8a53-d7bdebb0af87\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ll9ll" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065488 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqx8x\" (UniqueName: \"kubernetes.io/projected/d8970c17-d95f-454b-ac56-db24223ef2fc-kube-api-access-jqx8x\") pod \"router-default-5444994796-gnmfp\" (UID: \"d8970c17-d95f-454b-ac56-db24223ef2fc\") " pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065519 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065541 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065561 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065580 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0e461d8c-b64b-4a75-8bab-2056dfd16821-etcd-service-ca\") pod \"etcd-operator-b45778765-w66fc\" (UID: \"0e461d8c-b64b-4a75-8bab-2056dfd16821\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065601 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbrch\" (UniqueName: \"kubernetes.io/projected/0e461d8c-b64b-4a75-8bab-2056dfd16821-kube-api-access-wbrch\") pod \"etcd-operator-b45778765-w66fc\" (UID: \"0e461d8c-b64b-4a75-8bab-2056dfd16821\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065680 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9sjlv\" (UniqueName: \"kubernetes.io/projected/133ce1d2-cc16-41f9-b136-d197c20847d1-kube-api-access-9sjlv\") pod \"dns-operator-744455d44c-q8cn5\" (UID: \"133ce1d2-cc16-41f9-b136-d197c20847d1\") " pod="openshift-dns-operator/dns-operator-744455d44c-q8cn5" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065698 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b78fa54a-7566-4b3c-ad3a-59fce0462af7-config\") pod \"kube-apiserver-operator-766d6c64bb-j5fc5\" (UID: \"b78fa54a-7566-4b3c-ad3a-59fce0462af7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-j5fc5" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065715 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe65e822-d6e5-4427-ae15-a91ce81f90a5-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-j5wfn\" (UID: \"fe65e822-d6e5-4427-ae15-a91ce81f90a5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j5wfn" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065732 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065759 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f6918a12-03b6-4867-8598-8739f036c746-service-ca-bundle\") pod \"authentication-operator-69f744f599-888xq\" (UID: \"f6918a12-03b6-4867-8598-8739f036c746\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065779 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065796 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b78fa54a-7566-4b3c-ad3a-59fce0462af7-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-j5fc5\" (UID: \"b78fa54a-7566-4b3c-ad3a-59fce0462af7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-j5fc5" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065811 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/94a6a5c0-845a-4f60-b111-eb28393fb07c-audit-dir\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065840 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065862 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7647753f-a8c4-498f-b876-7553155a6159-auth-proxy-config\") pod \"machine-config-operator-74547568cd-g6ck2\" (UID: \"7647753f-a8c4-498f-b876-7553155a6159\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065878 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0e461d8c-b64b-4a75-8bab-2056dfd16821-serving-cert\") pod \"etcd-operator-b45778765-w66fc\" (UID: \"0e461d8c-b64b-4a75-8bab-2056dfd16821\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065961 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/90c5ca01-2b80-48f5-8a53-d7bdebb0af87-proxy-tls\") pod \"machine-config-controller-84d6567774-ll9ll\" (UID: \"90c5ca01-2b80-48f5-8a53-d7bdebb0af87\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ll9ll" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065978 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/bc973df4-6c8f-448c-a1f8-609fd7526f3e-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-5n9lk\" (UID: \"bc973df4-6c8f-448c-a1f8-609fd7526f3e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5n9lk" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.065993 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9hqg\" (UniqueName: \"kubernetes.io/projected/f6918a12-03b6-4867-8598-8739f036c746-kube-api-access-s9hqg\") pod \"authentication-operator-69f744f599-888xq\" (UID: \"f6918a12-03b6-4867-8598-8739f036c746\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.066042 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/d8970c17-d95f-454b-ac56-db24223ef2fc-stats-auth\") pod \"router-default-5444994796-gnmfp\" (UID: \"d8970c17-d95f-454b-ac56-db24223ef2fc\") " pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.066071 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b78fa54a-7566-4b3c-ad3a-59fce0462af7-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-j5fc5\" (UID: \"b78fa54a-7566-4b3c-ad3a-59fce0462af7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-j5fc5" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.066094 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlhtl\" (UniqueName: \"kubernetes.io/projected/90c5ca01-2b80-48f5-8a53-d7bdebb0af87-kube-api-access-mlhtl\") pod \"machine-config-controller-84d6567774-ll9ll\" (UID: \"90c5ca01-2b80-48f5-8a53-d7bdebb0af87\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ll9ll" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.066111 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ww58z\" (UniqueName: \"kubernetes.io/projected/fe65e822-d6e5-4427-ae15-a91ce81f90a5-kube-api-access-ww58z\") pod \"kube-storage-version-migrator-operator-b67b599dd-j5wfn\" (UID: \"fe65e822-d6e5-4427-ae15-a91ce81f90a5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j5wfn" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.066125 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.066165 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f6918a12-03b6-4867-8598-8739f036c746-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-888xq\" (UID: \"f6918a12-03b6-4867-8598-8739f036c746\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.066182 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.066202 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe65e822-d6e5-4427-ae15-a91ce81f90a5-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-j5wfn\" (UID: \"fe65e822-d6e5-4427-ae15-a91ce81f90a5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j5wfn" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.066217 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f6918a12-03b6-4867-8598-8739f036c746-serving-cert\") pod \"authentication-operator-69f744f599-888xq\" (UID: \"f6918a12-03b6-4867-8598-8739f036c746\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.066233 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.066249 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d8970c17-d95f-454b-ac56-db24223ef2fc-metrics-certs\") pod \"router-default-5444994796-gnmfp\" (UID: \"d8970c17-d95f-454b-ac56-db24223ef2fc\") " pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.066263 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.066300 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0e461d8c-b64b-4a75-8bab-2056dfd16821-etcd-ca\") pod \"etcd-operator-b45778765-w66fc\" (UID: \"0e461d8c-b64b-4a75-8bab-2056dfd16821\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.066314 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0e461d8c-b64b-4a75-8bab-2056dfd16821-etcd-client\") pod \"etcd-operator-b45778765-w66fc\" (UID: \"0e461d8c-b64b-4a75-8bab-2056dfd16821\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.066331 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d8970c17-d95f-454b-ac56-db24223ef2fc-service-ca-bundle\") pod \"router-default-5444994796-gnmfp\" (UID: \"d8970c17-d95f-454b-ac56-db24223ef2fc\") " pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.066362 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f6918a12-03b6-4867-8598-8739f036c746-config\") pod \"authentication-operator-69f744f599-888xq\" (UID: \"f6918a12-03b6-4867-8598-8739f036c746\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.066380 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e461d8c-b64b-4a75-8bab-2056dfd16821-config\") pod \"etcd-operator-b45778765-w66fc\" (UID: \"0e461d8c-b64b-4a75-8bab-2056dfd16821\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.066397 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-audit-policies\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.069065 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-m7rls"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.072858 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.075669 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.079053 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.080826 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-scg48"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.081816 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.083981 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9g8j4"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.084035 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.084990 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6fgm2"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.086009 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.087437 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-96n5p"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.088458 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-ll9ll"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.089489 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xq6hp"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.090535 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j5wfn"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.091573 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-j5fc5"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.093027 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-kvwlg"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.094848 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.095319 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-tg7pk"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.096623 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-fbfxd"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.097777 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-w66fc"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.098797 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-c494l"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.099546 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-c494l" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.100116 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-c494l"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.101221 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-mfrws"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.102299 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.103598 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-rkqvp"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.104994 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.106110 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8gcpn"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.107099 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-5lwgz"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.107599 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-5lwgz" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.114597 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.134826 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.156998 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.166913 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqx8x\" (UniqueName: \"kubernetes.io/projected/d8970c17-d95f-454b-ac56-db24223ef2fc-kube-api-access-jqx8x\") pod \"router-default-5444994796-gnmfp\" (UID: \"d8970c17-d95f-454b-ac56-db24223ef2fc\") " pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.166977 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167010 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167037 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167062 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0e461d8c-b64b-4a75-8bab-2056dfd16821-etcd-service-ca\") pod \"etcd-operator-b45778765-w66fc\" (UID: \"0e461d8c-b64b-4a75-8bab-2056dfd16821\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167086 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbrch\" (UniqueName: \"kubernetes.io/projected/0e461d8c-b64b-4a75-8bab-2056dfd16821-kube-api-access-wbrch\") pod \"etcd-operator-b45778765-w66fc\" (UID: \"0e461d8c-b64b-4a75-8bab-2056dfd16821\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167114 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b78fa54a-7566-4b3c-ad3a-59fce0462af7-config\") pod \"kube-apiserver-operator-766d6c64bb-j5fc5\" (UID: \"b78fa54a-7566-4b3c-ad3a-59fce0462af7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-j5fc5" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167138 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe65e822-d6e5-4427-ae15-a91ce81f90a5-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-j5wfn\" (UID: \"fe65e822-d6e5-4427-ae15-a91ce81f90a5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j5wfn" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167163 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167190 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9sjlv\" (UniqueName: \"kubernetes.io/projected/133ce1d2-cc16-41f9-b136-d197c20847d1-kube-api-access-9sjlv\") pod \"dns-operator-744455d44c-q8cn5\" (UID: \"133ce1d2-cc16-41f9-b136-d197c20847d1\") " pod="openshift-dns-operator/dns-operator-744455d44c-q8cn5" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167220 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f6918a12-03b6-4867-8598-8739f036c746-service-ca-bundle\") pod \"authentication-operator-69f744f599-888xq\" (UID: \"f6918a12-03b6-4867-8598-8739f036c746\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167251 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167277 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b78fa54a-7566-4b3c-ad3a-59fce0462af7-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-j5fc5\" (UID: \"b78fa54a-7566-4b3c-ad3a-59fce0462af7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-j5fc5" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167302 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/94a6a5c0-845a-4f60-b111-eb28393fb07c-audit-dir\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167340 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167370 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7647753f-a8c4-498f-b876-7553155a6159-auth-proxy-config\") pod \"machine-config-operator-74547568cd-g6ck2\" (UID: \"7647753f-a8c4-498f-b876-7553155a6159\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167395 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0e461d8c-b64b-4a75-8bab-2056dfd16821-serving-cert\") pod \"etcd-operator-b45778765-w66fc\" (UID: \"0e461d8c-b64b-4a75-8bab-2056dfd16821\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167432 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/90c5ca01-2b80-48f5-8a53-d7bdebb0af87-proxy-tls\") pod \"machine-config-controller-84d6567774-ll9ll\" (UID: \"90c5ca01-2b80-48f5-8a53-d7bdebb0af87\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ll9ll" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167458 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/bc973df4-6c8f-448c-a1f8-609fd7526f3e-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-5n9lk\" (UID: \"bc973df4-6c8f-448c-a1f8-609fd7526f3e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5n9lk" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167487 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9hqg\" (UniqueName: \"kubernetes.io/projected/f6918a12-03b6-4867-8598-8739f036c746-kube-api-access-s9hqg\") pod \"authentication-operator-69f744f599-888xq\" (UID: \"f6918a12-03b6-4867-8598-8739f036c746\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167548 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/d8970c17-d95f-454b-ac56-db24223ef2fc-stats-auth\") pod \"router-default-5444994796-gnmfp\" (UID: \"d8970c17-d95f-454b-ac56-db24223ef2fc\") " pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167574 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b78fa54a-7566-4b3c-ad3a-59fce0462af7-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-j5fc5\" (UID: \"b78fa54a-7566-4b3c-ad3a-59fce0462af7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-j5fc5" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167612 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlhtl\" (UniqueName: \"kubernetes.io/projected/90c5ca01-2b80-48f5-8a53-d7bdebb0af87-kube-api-access-mlhtl\") pod \"machine-config-controller-84d6567774-ll9ll\" (UID: \"90c5ca01-2b80-48f5-8a53-d7bdebb0af87\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ll9ll" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167640 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167672 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ww58z\" (UniqueName: \"kubernetes.io/projected/fe65e822-d6e5-4427-ae15-a91ce81f90a5-kube-api-access-ww58z\") pod \"kube-storage-version-migrator-operator-b67b599dd-j5wfn\" (UID: \"fe65e822-d6e5-4427-ae15-a91ce81f90a5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j5wfn" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167718 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f6918a12-03b6-4867-8598-8739f036c746-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-888xq\" (UID: \"f6918a12-03b6-4867-8598-8739f036c746\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167744 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167770 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe65e822-d6e5-4427-ae15-a91ce81f90a5-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-j5wfn\" (UID: \"fe65e822-d6e5-4427-ae15-a91ce81f90a5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j5wfn" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167791 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f6918a12-03b6-4867-8598-8739f036c746-serving-cert\") pod \"authentication-operator-69f744f599-888xq\" (UID: \"f6918a12-03b6-4867-8598-8739f036c746\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167814 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167841 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d8970c17-d95f-454b-ac56-db24223ef2fc-metrics-certs\") pod \"router-default-5444994796-gnmfp\" (UID: \"d8970c17-d95f-454b-ac56-db24223ef2fc\") " pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167863 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167896 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0e461d8c-b64b-4a75-8bab-2056dfd16821-etcd-ca\") pod \"etcd-operator-b45778765-w66fc\" (UID: \"0e461d8c-b64b-4a75-8bab-2056dfd16821\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167916 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0e461d8c-b64b-4a75-8bab-2056dfd16821-etcd-client\") pod \"etcd-operator-b45778765-w66fc\" (UID: \"0e461d8c-b64b-4a75-8bab-2056dfd16821\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.167957 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d8970c17-d95f-454b-ac56-db24223ef2fc-service-ca-bundle\") pod \"router-default-5444994796-gnmfp\" (UID: \"d8970c17-d95f-454b-ac56-db24223ef2fc\") " pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.168001 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f6918a12-03b6-4867-8598-8739f036c746-config\") pod \"authentication-operator-69f744f599-888xq\" (UID: \"f6918a12-03b6-4867-8598-8739f036c746\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.168025 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e461d8c-b64b-4a75-8bab-2056dfd16821-config\") pod \"etcd-operator-b45778765-w66fc\" (UID: \"0e461d8c-b64b-4a75-8bab-2056dfd16821\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.168050 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-audit-policies\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.168089 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p28xv\" (UniqueName: \"kubernetes.io/projected/94a6a5c0-845a-4f60-b111-eb28393fb07c-kube-api-access-p28xv\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.168093 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/94a6a5c0-845a-4f60-b111-eb28393fb07c-audit-dir\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.168111 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/d8970c17-d95f-454b-ac56-db24223ef2fc-default-certificate\") pod \"router-default-5444994796-gnmfp\" (UID: \"d8970c17-d95f-454b-ac56-db24223ef2fc\") " pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.168227 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.168271 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7647753f-a8c4-498f-b876-7553155a6159-proxy-tls\") pod \"machine-config-operator-74547568cd-g6ck2\" (UID: \"7647753f-a8c4-498f-b876-7553155a6159\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.168320 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbllq\" (UniqueName: \"kubernetes.io/projected/7647753f-a8c4-498f-b876-7553155a6159-kube-api-access-sbllq\") pod \"machine-config-operator-74547568cd-g6ck2\" (UID: \"7647753f-a8c4-498f-b876-7553155a6159\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.168356 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgr9w\" (UniqueName: \"kubernetes.io/projected/bc973df4-6c8f-448c-a1f8-609fd7526f3e-kube-api-access-bgr9w\") pod \"cluster-samples-operator-665b6dd947-5n9lk\" (UID: \"bc973df4-6c8f-448c-a1f8-609fd7526f3e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5n9lk" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.168389 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/133ce1d2-cc16-41f9-b136-d197c20847d1-metrics-tls\") pod \"dns-operator-744455d44c-q8cn5\" (UID: \"133ce1d2-cc16-41f9-b136-d197c20847d1\") " pod="openshift-dns-operator/dns-operator-744455d44c-q8cn5" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.168428 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/7647753f-a8c4-498f-b876-7553155a6159-images\") pod \"machine-config-operator-74547568cd-g6ck2\" (UID: \"7647753f-a8c4-498f-b876-7553155a6159\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.168469 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/90c5ca01-2b80-48f5-8a53-d7bdebb0af87-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-ll9ll\" (UID: \"90c5ca01-2b80-48f5-8a53-d7bdebb0af87\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ll9ll" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.168624 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0e461d8c-b64b-4a75-8bab-2056dfd16821-etcd-service-ca\") pod \"etcd-operator-b45778765-w66fc\" (UID: \"0e461d8c-b64b-4a75-8bab-2056dfd16821\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.169216 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f6918a12-03b6-4867-8598-8739f036c746-service-ca-bundle\") pod \"authentication-operator-69f744f599-888xq\" (UID: \"f6918a12-03b6-4867-8598-8739f036c746\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.169903 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e461d8c-b64b-4a75-8bab-2056dfd16821-config\") pod \"etcd-operator-b45778765-w66fc\" (UID: \"0e461d8c-b64b-4a75-8bab-2056dfd16821\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.169997 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/90c5ca01-2b80-48f5-8a53-d7bdebb0af87-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-ll9ll\" (UID: \"90c5ca01-2b80-48f5-8a53-d7bdebb0af87\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ll9ll" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.170038 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.170045 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f6918a12-03b6-4867-8598-8739f036c746-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-888xq\" (UID: \"f6918a12-03b6-4867-8598-8739f036c746\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.170834 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.170946 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f6918a12-03b6-4867-8598-8739f036c746-config\") pod \"authentication-operator-69f744f599-888xq\" (UID: \"f6918a12-03b6-4867-8598-8739f036c746\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.171109 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.171138 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7647753f-a8c4-498f-b876-7553155a6159-auth-proxy-config\") pod \"machine-config-operator-74547568cd-g6ck2\" (UID: \"7647753f-a8c4-498f-b876-7553155a6159\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.171469 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.171826 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.171862 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f6918a12-03b6-4867-8598-8739f036c746-serving-cert\") pod \"authentication-operator-69f744f599-888xq\" (UID: \"f6918a12-03b6-4867-8598-8739f036c746\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.172024 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0e461d8c-b64b-4a75-8bab-2056dfd16821-etcd-ca\") pod \"etcd-operator-b45778765-w66fc\" (UID: \"0e461d8c-b64b-4a75-8bab-2056dfd16821\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.171862 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-audit-policies\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.173666 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0e461d8c-b64b-4a75-8bab-2056dfd16821-serving-cert\") pod \"etcd-operator-b45778765-w66fc\" (UID: \"0e461d8c-b64b-4a75-8bab-2056dfd16821\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.173730 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.174373 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.174626 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.175105 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.175238 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.175693 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.176296 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/bc973df4-6c8f-448c-a1f8-609fd7526f3e-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-5n9lk\" (UID: \"bc973df4-6c8f-448c-a1f8-609fd7526f3e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5n9lk" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.176301 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.178290 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0e461d8c-b64b-4a75-8bab-2056dfd16821-etcd-client\") pod \"etcd-operator-b45778765-w66fc\" (UID: \"0e461d8c-b64b-4a75-8bab-2056dfd16821\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.195964 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.200853 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b78fa54a-7566-4b3c-ad3a-59fce0462af7-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-j5fc5\" (UID: \"b78fa54a-7566-4b3c-ad3a-59fce0462af7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-j5fc5" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.214383 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.218773 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b78fa54a-7566-4b3c-ad3a-59fce0462af7-config\") pod \"kube-apiserver-operator-766d6c64bb-j5fc5\" (UID: \"b78fa54a-7566-4b3c-ad3a-59fce0462af7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-j5fc5" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.235533 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.239038 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe65e822-d6e5-4427-ae15-a91ce81f90a5-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-j5wfn\" (UID: \"fe65e822-d6e5-4427-ae15-a91ce81f90a5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j5wfn" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.255778 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.274837 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.285733 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe65e822-d6e5-4427-ae15-a91ce81f90a5-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-j5wfn\" (UID: \"fe65e822-d6e5-4427-ae15-a91ce81f90a5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j5wfn" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.295726 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.315302 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.335001 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.355162 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.363063 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/d8970c17-d95f-454b-ac56-db24223ef2fc-stats-auth\") pod \"router-default-5444994796-gnmfp\" (UID: \"d8970c17-d95f-454b-ac56-db24223ef2fc\") " pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.375056 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.380980 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d8970c17-d95f-454b-ac56-db24223ef2fc-service-ca-bundle\") pod \"router-default-5444994796-gnmfp\" (UID: \"d8970c17-d95f-454b-ac56-db24223ef2fc\") " pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.394738 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.414706 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.423278 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d8970c17-d95f-454b-ac56-db24223ef2fc-metrics-certs\") pod \"router-default-5444994796-gnmfp\" (UID: \"d8970c17-d95f-454b-ac56-db24223ef2fc\") " pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.435793 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.442106 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/d8970c17-d95f-454b-ac56-db24223ef2fc-default-certificate\") pod \"router-default-5444994796-gnmfp\" (UID: \"d8970c17-d95f-454b-ac56-db24223ef2fc\") " pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.455784 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.475328 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.495375 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.515718 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.524353 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/90c5ca01-2b80-48f5-8a53-d7bdebb0af87-proxy-tls\") pod \"machine-config-controller-84d6567774-ll9ll\" (UID: \"90c5ca01-2b80-48f5-8a53-d7bdebb0af87\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ll9ll" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.555025 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.562109 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/7647753f-a8c4-498f-b876-7553155a6159-images\") pod \"machine-config-operator-74547568cd-g6ck2\" (UID: \"7647753f-a8c4-498f-b876-7553155a6159\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.574783 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.595519 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.603151 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7647753f-a8c4-498f-b876-7553155a6159-proxy-tls\") pod \"machine-config-operator-74547568cd-g6ck2\" (UID: \"7647753f-a8c4-498f-b876-7553155a6159\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.617230 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.623724 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/133ce1d2-cc16-41f9-b136-d197c20847d1-metrics-tls\") pod \"dns-operator-744455d44c-q8cn5\" (UID: \"133ce1d2-cc16-41f9-b136-d197c20847d1\") " pod="openshift-dns-operator/dns-operator-744455d44c-q8cn5" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.634501 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.655319 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.675533 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.718984 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rk8l\" (UniqueName: \"kubernetes.io/projected/4bb9049c-5f9a-4260-bd40-3140669d6701-kube-api-access-8rk8l\") pod \"machine-api-operator-5694c8668f-7rnqd\" (UID: \"4bb9049c-5f9a-4260-bd40-3140669d6701\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7rnqd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.742716 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4dds\" (UniqueName: \"kubernetes.io/projected/a07c676e-64cc-4962-8d9a-251ab4c93d60-kube-api-access-r4dds\") pod \"cluster-image-registry-operator-dc59b4c8b-p8vq2\" (UID: \"a07c676e-64cc-4962-8d9a-251ab4c93d60\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.747443 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-7rnqd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.764667 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7smv5\" (UniqueName: \"kubernetes.io/projected/a4a87b84-cb7a-4406-ac3a-473e984376a1-kube-api-access-7smv5\") pod \"route-controller-manager-6576b87f9c-xkkwg\" (UID: \"a4a87b84-cb7a-4406-ac3a-473e984376a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.769024 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.774740 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsdpr\" (UniqueName: \"kubernetes.io/projected/3a715e0d-75ba-45d9-a213-33a6a10f4b9f-kube-api-access-qsdpr\") pod \"console-operator-58897d9998-m7rls\" (UID: \"3a715e0d-75ba-45d9-a213-33a6a10f4b9f\") " pod="openshift-console-operator/console-operator-58897d9998-m7rls" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.791333 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-m7rls" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.794274 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2j4mr\" (UniqueName: \"kubernetes.io/projected/8b00b5de-92e6-45ef-bd66-2f06b0b0e249-kube-api-access-2j4mr\") pod \"downloads-7954f5f757-7r27t\" (UID: \"8b00b5de-92e6-45ef-bd66-2f06b0b0e249\") " pod="openshift-console/downloads-7954f5f757-7r27t" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.813009 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bpth\" (UniqueName: \"kubernetes.io/projected/e3243d1f-1bcb-48cf-83e2-7e6eeaf15126-kube-api-access-6bpth\") pod \"machine-approver-56656f9798-4chv9\" (UID: \"e3243d1f-1bcb-48cf-83e2-7e6eeaf15126\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4chv9" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.831234 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6qmz\" (UniqueName: \"kubernetes.io/projected/4709760d-9993-42d3-97c3-bd5470b9c8ab-kube-api-access-m6qmz\") pod \"console-f9d7485db-wr9kd\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.857567 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ds77c\" (UniqueName: \"kubernetes.io/projected/f7a2ab63-3622-49c3-abef-fc6ff98758e4-kube-api-access-ds77c\") pod \"controller-manager-879f6c89f-xkh6t\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.867282 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5ms7\" (UniqueName: \"kubernetes.io/projected/28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33-kube-api-access-r5ms7\") pod \"apiserver-7bbb656c7d-bf4lg\" (UID: \"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.894876 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a07c676e-64cc-4962-8d9a-251ab4c93d60-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-p8vq2\" (UID: \"a07c676e-64cc-4962-8d9a-251ab4c93d60\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.918794 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zckkr\" (UniqueName: \"kubernetes.io/projected/3e75be35-3cb2-42b8-b16f-e434ea90c8ef-kube-api-access-zckkr\") pod \"openshift-apiserver-operator-796bbdcf4f-cgfst\" (UID: \"3e75be35-3cb2-42b8-b16f-e434ea90c8ef\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cgfst" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.929881 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwr9g\" (UniqueName: \"kubernetes.io/projected/dde52314-bdf5-4c51-90e2-a258a21ec712-kube-api-access-dwr9g\") pod \"openshift-config-operator-7777fb866f-lnpjt\" (UID: \"dde52314-bdf5-4c51-90e2-a258a21ec712\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.933043 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-7r27t" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.950161 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8scfl\" (UniqueName: \"kubernetes.io/projected/7026c744-378e-4b55-b738-e295be19ef2a-kube-api-access-8scfl\") pod \"openshift-controller-manager-operator-756b6f6bc6-x4d8s\" (UID: \"7026c744-378e-4b55-b738-e295be19ef2a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-x4d8s" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.954586 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cgfst" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.966462 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-7rnqd"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.970000 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drw4c\" (UniqueName: \"kubernetes.io/projected/95508e9a-38fe-4c90-83ee-1b87733b07a6-kube-api-access-drw4c\") pod \"apiserver-76f77b778f-qv4dz\" (UID: \"95508e9a-38fe-4c90-83ee-1b87733b07a6\") " pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.971747 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:44 crc kubenswrapper[4818]: W0930 17:01:44.973855 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4bb9049c_5f9a_4260_bd40_3140669d6701.slice/crio-47e11d5dd778fa1ae9f0bb56927b9aa33b4a6ccf86dd0e7d747d7cdba11c0438 WatchSource:0}: Error finding container 47e11d5dd778fa1ae9f0bb56927b9aa33b4a6ccf86dd0e7d747d7cdba11c0438: Status 404 returned error can't find the container with id 47e11d5dd778fa1ae9f0bb56927b9aa33b4a6ccf86dd0e7d747d7cdba11c0438 Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.975670 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.992448 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.993264 4818 request.go:700] Waited for 1.002495976s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operator-lifecycle-manager/secrets?fieldSelector=metadata.name%3Dolm-operator-serviceaccount-dockercfg-rq7zk&limit=500&resourceVersion=0 Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.993320 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg"] Sep 30 17:01:44 crc kubenswrapper[4818]: I0930 17:01:44.996277 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.012749 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4chv9" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.014550 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.026929 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.030446 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-m7rls"] Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.038770 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Sep 30 17:01:45 crc kubenswrapper[4818]: W0930 17:01:45.042147 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode3243d1f_1bcb_48cf_83e2_7e6eeaf15126.slice/crio-a27ed3c8b41c56011af99bb386075a6052db64584d7f1b698b78a5d6ec355ec5 WatchSource:0}: Error finding container a27ed3c8b41c56011af99bb386075a6052db64584d7f1b698b78a5d6ec355ec5: Status 404 returned error can't find the container with id a27ed3c8b41c56011af99bb386075a6052db64584d7f1b698b78a5d6ec355ec5 Sep 30 17:01:45 crc kubenswrapper[4818]: W0930 17:01:45.047568 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3a715e0d_75ba_45d9_a213_33a6a10f4b9f.slice/crio-c0a02833c4d71fbc1c2365776c47b4f123e8998ada0560ff3eaa8b894768607d WatchSource:0}: Error finding container c0a02833c4d71fbc1c2365776c47b4f123e8998ada0560ff3eaa8b894768607d: Status 404 returned error can't find the container with id c0a02833c4d71fbc1c2365776c47b4f123e8998ada0560ff3eaa8b894768607d Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.054968 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.074340 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.074816 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.108210 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.115425 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.137900 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.140146 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.140507 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-7r27t"] Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.155946 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.164771 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-x4d8s" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.179887 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.195365 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.218565 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cgfst"] Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.218576 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.234641 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.250953 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-wr9kd"] Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.256260 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.267507 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.275238 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.286164 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg"] Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.294585 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.314788 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.323639 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xkh6t"] Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.334345 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.354599 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.378175 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt"] Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.381044 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.395548 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.409445 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2"] Sep 30 17:01:45 crc kubenswrapper[4818]: W0930 17:01:45.411340 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddde52314_bdf5_4c51_90e2_a258a21ec712.slice/crio-6ce19310aa268acebbe16d209b97f4f1a9519bbbce00843a921c0ba456c6e08b WatchSource:0}: Error finding container 6ce19310aa268acebbe16d209b97f4f1a9519bbbce00843a921c0ba456c6e08b: Status 404 returned error can't find the container with id 6ce19310aa268acebbe16d209b97f4f1a9519bbbce00843a921c0ba456c6e08b Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.434676 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.438433 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-x4d8s"] Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.454117 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Sep 30 17:01:45 crc kubenswrapper[4818]: W0930 17:01:45.461847 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7026c744_378e_4b55_b738_e295be19ef2a.slice/crio-354f2e65d009aa088ebc5c1303515566b41f1bf9855de784390bcf2db8d47ef3 WatchSource:0}: Error finding container 354f2e65d009aa088ebc5c1303515566b41f1bf9855de784390bcf2db8d47ef3: Status 404 returned error can't find the container with id 354f2e65d009aa088ebc5c1303515566b41f1bf9855de784390bcf2db8d47ef3 Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.474076 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.494903 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.514494 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-qv4dz"] Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.514610 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.535481 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Sep 30 17:01:45 crc kubenswrapper[4818]: W0930 17:01:45.549866 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod95508e9a_38fe_4c90_83ee_1b87733b07a6.slice/crio-f8a8ceb1182db0f1f871edf28a9225c5ad59cfc5431813c9da6b16de683086a5 WatchSource:0}: Error finding container f8a8ceb1182db0f1f871edf28a9225c5ad59cfc5431813c9da6b16de683086a5: Status 404 returned error can't find the container with id f8a8ceb1182db0f1f871edf28a9225c5ad59cfc5431813c9da6b16de683086a5 Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.554640 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.574473 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.595054 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.614875 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.634577 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.654989 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.674871 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.698056 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.714949 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.735276 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.754694 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.774629 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.794907 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.814582 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.835823 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.836469 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-wr9kd" event={"ID":"4709760d-9993-42d3-97c3-bd5470b9c8ab","Type":"ContainerStarted","Data":"ab902c35b7553bc93a232aa3d2209531d4ed4a1f1235bf7032864d69def1f6e0"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.836510 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-wr9kd" event={"ID":"4709760d-9993-42d3-97c3-bd5470b9c8ab","Type":"ContainerStarted","Data":"98e66d2e996d2d1fe2de5ba492a09639c6dfa5ba0425ab1d47f32815ad7abf70"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.837976 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-x4d8s" event={"ID":"7026c744-378e-4b55-b738-e295be19ef2a","Type":"ContainerStarted","Data":"079c53dd1e5d8fd42dfa1288f70674b12e6211a81cfdd3c1308b15681753de8d"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.838004 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-x4d8s" event={"ID":"7026c744-378e-4b55-b738-e295be19ef2a","Type":"ContainerStarted","Data":"354f2e65d009aa088ebc5c1303515566b41f1bf9855de784390bcf2db8d47ef3"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.839894 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-7rnqd" event={"ID":"4bb9049c-5f9a-4260-bd40-3140669d6701","Type":"ContainerStarted","Data":"c4e3d585c50f28b38138c2570cd47ef221eec474b692535ec30b5ddb4f5df6f0"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.839936 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-7rnqd" event={"ID":"4bb9049c-5f9a-4260-bd40-3140669d6701","Type":"ContainerStarted","Data":"4e04cfb3085eb5138f41ae3cf6cb99faaa26a99cf07a7aac965fa10677cb0d85"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.839950 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-7rnqd" event={"ID":"4bb9049c-5f9a-4260-bd40-3140669d6701","Type":"ContainerStarted","Data":"47e11d5dd778fa1ae9f0bb56927b9aa33b4a6ccf86dd0e7d747d7cdba11c0438"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.840949 4818 generic.go:334] "Generic (PLEG): container finished" podID="dde52314-bdf5-4c51-90e2-a258a21ec712" containerID="e80dbfec4e18e1fd0d8fa71dfda1a0dd79c1f6e0ca2aa0ebf3cdb099928f0e99" exitCode=0 Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.841006 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt" event={"ID":"dde52314-bdf5-4c51-90e2-a258a21ec712","Type":"ContainerDied","Data":"e80dbfec4e18e1fd0d8fa71dfda1a0dd79c1f6e0ca2aa0ebf3cdb099928f0e99"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.841026 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt" event={"ID":"dde52314-bdf5-4c51-90e2-a258a21ec712","Type":"ContainerStarted","Data":"6ce19310aa268acebbe16d209b97f4f1a9519bbbce00843a921c0ba456c6e08b"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.842248 4818 generic.go:334] "Generic (PLEG): container finished" podID="28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33" containerID="3d9a933d836ddf3c13c2321148192c4e243d08ae8e277669996641b66d75e115" exitCode=0 Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.842284 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" event={"ID":"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33","Type":"ContainerDied","Data":"3d9a933d836ddf3c13c2321148192c4e243d08ae8e277669996641b66d75e115"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.842338 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" event={"ID":"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33","Type":"ContainerStarted","Data":"99450c164f22a856003e8197f2f25afdf39d6f3566891ebb22847f4a6f59c840"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.845542 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2" event={"ID":"a07c676e-64cc-4962-8d9a-251ab4c93d60","Type":"ContainerStarted","Data":"195a4a5fcd14b75449a4d95137bb5ffa0d855ccbfcae9a418a0ad5b435725f8b"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.845577 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2" event={"ID":"a07c676e-64cc-4962-8d9a-251ab4c93d60","Type":"ContainerStarted","Data":"cbcf953b7bfa207a48dd9caceaba0f3041ca69e4e3776487eae4d8e8f7c6557e"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.848440 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" event={"ID":"a4a87b84-cb7a-4406-ac3a-473e984376a1","Type":"ContainerStarted","Data":"bdd7e725ad71d4d3250c4733b370eed3d718ab861f030cbe0d04d4c67fdcc2b3"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.848467 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" event={"ID":"a4a87b84-cb7a-4406-ac3a-473e984376a1","Type":"ContainerStarted","Data":"71161e90649e21a2c1e2943b936ddfcecd738d45fb34de0249dcd23a1e01e253"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.848640 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.850892 4818 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-xkkwg container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.850948 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" podUID="a4a87b84-cb7a-4406-ac3a-473e984376a1" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.851795 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4chv9" event={"ID":"e3243d1f-1bcb-48cf-83e2-7e6eeaf15126","Type":"ContainerStarted","Data":"4611d8470d7e6f183eae9942eb7f2fee165f0f6dad5b136f93505e610875c06a"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.851828 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4chv9" event={"ID":"e3243d1f-1bcb-48cf-83e2-7e6eeaf15126","Type":"ContainerStarted","Data":"a27ed3c8b41c56011af99bb386075a6052db64584d7f1b698b78a5d6ec355ec5"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.855190 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.855165 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cgfst" event={"ID":"3e75be35-3cb2-42b8-b16f-e434ea90c8ef","Type":"ContainerStarted","Data":"80a6bdf50df9b63a6e735ccd69ef2c8194fd03fc64d0182511ac29ef02ae4555"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.855249 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cgfst" event={"ID":"3e75be35-3cb2-42b8-b16f-e434ea90c8ef","Type":"ContainerStarted","Data":"f34c99a004fba61f51c5d2135ff4cdbf531ebcec3275cbdafe8da2c27167d036"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.856732 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7r27t" event={"ID":"8b00b5de-92e6-45ef-bd66-2f06b0b0e249","Type":"ContainerStarted","Data":"efa37aa43c6707250cdc578c20778a7224fd16b161c8b7443d31093dcad3e9cc"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.856759 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7r27t" event={"ID":"8b00b5de-92e6-45ef-bd66-2f06b0b0e249","Type":"ContainerStarted","Data":"c8342953a522d17eac7a48941d45c28adb6af2ba1ef19f9fa8d57c9503a80d20"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.857115 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-7r27t" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.859969 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-m7rls" event={"ID":"3a715e0d-75ba-45d9-a213-33a6a10f4b9f","Type":"ContainerStarted","Data":"37c57a083fe00d3a3a029fc6b17fb84c5d46a0a7751379760e4e999e377a372f"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.860030 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-m7rls" event={"ID":"3a715e0d-75ba-45d9-a213-33a6a10f4b9f","Type":"ContainerStarted","Data":"c0a02833c4d71fbc1c2365776c47b4f123e8998ada0560ff3eaa8b894768607d"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.860361 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-m7rls" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.860474 4818 patch_prober.go:28] interesting pod/downloads-7954f5f757-7r27t container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.860509 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7r27t" podUID="8b00b5de-92e6-45ef-bd66-2f06b0b0e249" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.861705 4818 patch_prober.go:28] interesting pod/console-operator-58897d9998-m7rls container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.861752 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-m7rls" podUID="3a715e0d-75ba-45d9-a213-33a6a10f4b9f" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.862160 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" event={"ID":"95508e9a-38fe-4c90-83ee-1b87733b07a6","Type":"ContainerStarted","Data":"f8a8ceb1182db0f1f871edf28a9225c5ad59cfc5431813c9da6b16de683086a5"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.863399 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" event={"ID":"f7a2ab63-3622-49c3-abef-fc6ff98758e4","Type":"ContainerStarted","Data":"6523711e78c424baaf7781e540fec1362619462d2879e1c2fa6d6cf7a2efae43"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.863429 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" event={"ID":"f7a2ab63-3622-49c3-abef-fc6ff98758e4","Type":"ContainerStarted","Data":"32ac8af477cdc5a16e2ce78429e59f84497082b1375368c4e6a678d6055875d9"} Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.863841 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.865093 4818 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-xkh6t container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.865134 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" podUID="f7a2ab63-3622-49c3-abef-fc6ff98758e4" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.875010 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.895029 4818 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.915146 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.936052 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.956098 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.975333 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Sep 30 17:01:45 crc kubenswrapper[4818]: I0930 17:01:45.994798 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.012994 4818 request.go:700] Waited for 1.90520121s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/secrets?fieldSelector=metadata.name%3Dmachine-config-server-tls&limit=500&resourceVersion=0 Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.015196 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.036217 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.055892 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.097140 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqx8x\" (UniqueName: \"kubernetes.io/projected/d8970c17-d95f-454b-ac56-db24223ef2fc-kube-api-access-jqx8x\") pod \"router-default-5444994796-gnmfp\" (UID: \"d8970c17-d95f-454b-ac56-db24223ef2fc\") " pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.114027 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbrch\" (UniqueName: \"kubernetes.io/projected/0e461d8c-b64b-4a75-8bab-2056dfd16821-kube-api-access-wbrch\") pod \"etcd-operator-b45778765-w66fc\" (UID: \"0e461d8c-b64b-4a75-8bab-2056dfd16821\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.132152 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9sjlv\" (UniqueName: \"kubernetes.io/projected/133ce1d2-cc16-41f9-b136-d197c20847d1-kube-api-access-9sjlv\") pod \"dns-operator-744455d44c-q8cn5\" (UID: \"133ce1d2-cc16-41f9-b136-d197c20847d1\") " pod="openshift-dns-operator/dns-operator-744455d44c-q8cn5" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.151073 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b78fa54a-7566-4b3c-ad3a-59fce0462af7-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-j5fc5\" (UID: \"b78fa54a-7566-4b3c-ad3a-59fce0462af7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-j5fc5" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.171037 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlhtl\" (UniqueName: \"kubernetes.io/projected/90c5ca01-2b80-48f5-8a53-d7bdebb0af87-kube-api-access-mlhtl\") pod \"machine-config-controller-84d6567774-ll9ll\" (UID: \"90c5ca01-2b80-48f5-8a53-d7bdebb0af87\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ll9ll" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.185351 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.195012 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ww58z\" (UniqueName: \"kubernetes.io/projected/fe65e822-d6e5-4427-ae15-a91ce81f90a5-kube-api-access-ww58z\") pod \"kube-storage-version-migrator-operator-b67b599dd-j5wfn\" (UID: \"fe65e822-d6e5-4427-ae15-a91ce81f90a5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j5wfn" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.219246 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9hqg\" (UniqueName: \"kubernetes.io/projected/f6918a12-03b6-4867-8598-8739f036c746-kube-api-access-s9hqg\") pod \"authentication-operator-69f744f599-888xq\" (UID: \"f6918a12-03b6-4867-8598-8739f036c746\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.221108 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-j5fc5" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.229347 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ll9ll" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.233251 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbllq\" (UniqueName: \"kubernetes.io/projected/7647753f-a8c4-498f-b876-7553155a6159-kube-api-access-sbllq\") pod \"machine-config-operator-74547568cd-g6ck2\" (UID: \"7647753f-a8c4-498f-b876-7553155a6159\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.236603 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.243524 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j5wfn" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.249888 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.251811 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgr9w\" (UniqueName: \"kubernetes.io/projected/bc973df4-6c8f-448c-a1f8-609fd7526f3e-kube-api-access-bgr9w\") pod \"cluster-samples-operator-665b6dd947-5n9lk\" (UID: \"bc973df4-6c8f-448c-a1f8-609fd7526f3e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5n9lk" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.260758 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-q8cn5" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.270565 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p28xv\" (UniqueName: \"kubernetes.io/projected/94a6a5c0-845a-4f60-b111-eb28393fb07c-kube-api-access-p28xv\") pod \"oauth-openshift-558db77b4-fbfxd\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.302616 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-registry-tls\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.302657 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-registry-certificates\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.302680 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-ca-trust-extracted\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.302708 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-installation-pull-secrets\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.302744 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/445ee21c-87e4-4bd8-9550-0ed6c16933f7-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-fppn6\" (UID: \"445ee21c-87e4-4bd8-9550-0ed6c16933f7\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-fppn6" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.302769 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-trusted-ca\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.302791 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/445ee21c-87e4-4bd8-9550-0ed6c16933f7-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-fppn6\" (UID: \"445ee21c-87e4-4bd8-9550-0ed6c16933f7\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-fppn6" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.302812 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/445ee21c-87e4-4bd8-9550-0ed6c16933f7-config\") pod \"kube-controller-manager-operator-78b949d7b-fppn6\" (UID: \"445ee21c-87e4-4bd8-9550-0ed6c16933f7\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-fppn6" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.302844 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.302879 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-bound-sa-token\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.303033 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdxw6\" (UniqueName: \"kubernetes.io/projected/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-kube-api-access-zdxw6\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: E0930 17:01:46.303444 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:46.803422106 +0000 UTC m=+153.557693982 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.404007 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.404226 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-registry-certificates\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.404261 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-ca-trust-extracted\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.404292 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-installation-pull-secrets\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.404327 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/445ee21c-87e4-4bd8-9550-0ed6c16933f7-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-fppn6\" (UID: \"445ee21c-87e4-4bd8-9550-0ed6c16933f7\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-fppn6" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.404352 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-trusted-ca\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.404372 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/445ee21c-87e4-4bd8-9550-0ed6c16933f7-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-fppn6\" (UID: \"445ee21c-87e4-4bd8-9550-0ed6c16933f7\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-fppn6" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.404398 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/445ee21c-87e4-4bd8-9550-0ed6c16933f7-config\") pod \"kube-controller-manager-operator-78b949d7b-fppn6\" (UID: \"445ee21c-87e4-4bd8-9550-0ed6c16933f7\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-fppn6" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.404437 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-bound-sa-token\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.404469 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdxw6\" (UniqueName: \"kubernetes.io/projected/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-kube-api-access-zdxw6\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.404514 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-registry-tls\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.408943 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-trusted-ca\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.409438 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-ca-trust-extracted\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.409472 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/445ee21c-87e4-4bd8-9550-0ed6c16933f7-config\") pod \"kube-controller-manager-operator-78b949d7b-fppn6\" (UID: \"445ee21c-87e4-4bd8-9550-0ed6c16933f7\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-fppn6" Sep 30 17:01:46 crc kubenswrapper[4818]: E0930 17:01:46.410916 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:46.910870164 +0000 UTC m=+153.665142080 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.411442 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-registry-certificates\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.412463 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/445ee21c-87e4-4bd8-9550-0ed6c16933f7-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-fppn6\" (UID: \"445ee21c-87e4-4bd8-9550-0ed6c16933f7\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-fppn6" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.417456 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-registry-tls\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.418266 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-installation-pull-secrets\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.457588 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/445ee21c-87e4-4bd8-9550-0ed6c16933f7-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-fppn6\" (UID: \"445ee21c-87e4-4bd8-9550-0ed6c16933f7\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-fppn6" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.476140 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-bound-sa-token\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.477471 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5n9lk" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.492656 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.493280 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdxw6\" (UniqueName: \"kubernetes.io/projected/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-kube-api-access-zdxw6\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.499184 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.506394 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3-profile-collector-cert\") pod \"catalog-operator-68c6474976-9qx5l\" (UID: \"708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.509650 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/848650c8-e32a-4c8a-8aae-9fbfa62ba6b9-signing-key\") pod \"service-ca-9c57cc56f-tg7pk\" (UID: \"848650c8-e32a-4c8a-8aae-9fbfa62ba6b9\") " pod="openshift-service-ca/service-ca-9c57cc56f-tg7pk" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.509734 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/81b51d61-ed65-4721-8800-9920be8a34e0-webhook-cert\") pod \"packageserver-d55dfcdfc-4m5bh\" (UID: \"81b51d61-ed65-4721-8800-9920be8a34e0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.509751 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8484f8fc-ef23-4e59-9461-bdacf24ded18-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6fgm2\" (UID: \"8484f8fc-ef23-4e59-9461-bdacf24ded18\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6fgm2" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.509799 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4af5c2a5-d804-4267-ac93-799d3699c538-config\") pod \"service-ca-operator-777779d784-rkqvp\" (UID: \"4af5c2a5-d804-4267-ac93-799d3699c538\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rkqvp" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.509819 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hw7tb\" (UniqueName: \"kubernetes.io/projected/6430a370-0a1f-4a1f-b156-037ae868cf87-kube-api-access-hw7tb\") pod \"ingress-operator-5b745b69d9-nvtdf\" (UID: \"6430a370-0a1f-4a1f-b156-037ae868cf87\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.510853 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4af5c2a5-d804-4267-ac93-799d3699c538-serving-cert\") pod \"service-ca-operator-777779d784-rkqvp\" (UID: \"4af5c2a5-d804-4267-ac93-799d3699c538\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rkqvp" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.510952 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwm69\" (UniqueName: \"kubernetes.io/projected/517b0538-1d7a-495f-af7e-9dcfb71d9cd3-kube-api-access-nwm69\") pod \"multus-admission-controller-857f4d67dd-mfrws\" (UID: \"517b0538-1d7a-495f-af7e-9dcfb71d9cd3\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-mfrws" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.510999 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28fjj\" (UniqueName: \"kubernetes.io/projected/81b51d61-ed65-4721-8800-9920be8a34e0-kube-api-access-28fjj\") pod \"packageserver-d55dfcdfc-4m5bh\" (UID: \"81b51d61-ed65-4721-8800-9920be8a34e0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511046 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/81b51d61-ed65-4721-8800-9920be8a34e0-apiservice-cert\") pod \"packageserver-d55dfcdfc-4m5bh\" (UID: \"81b51d61-ed65-4721-8800-9920be8a34e0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511123 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78k2w\" (UniqueName: \"kubernetes.io/projected/c2569fcb-5e0d-486a-8a66-17a5539c2ae0-kube-api-access-78k2w\") pod \"ingress-canary-c494l\" (UID: \"c2569fcb-5e0d-486a-8a66-17a5539c2ae0\") " pod="openshift-ingress-canary/ingress-canary-c494l" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511138 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/d4db418f-d110-4268-bf5f-6a1c9e565f84-socket-dir\") pod \"csi-hostpathplugin-kvwlg\" (UID: \"d4db418f-d110-4268-bf5f-6a1c9e565f84\") " pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511153 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6430a370-0a1f-4a1f-b156-037ae868cf87-metrics-tls\") pod \"ingress-operator-5b745b69d9-nvtdf\" (UID: \"6430a370-0a1f-4a1f-b156-037ae868cf87\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511208 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6430a370-0a1f-4a1f-b156-037ae868cf87-trusted-ca\") pod \"ingress-operator-5b745b69d9-nvtdf\" (UID: \"6430a370-0a1f-4a1f-b156-037ae868cf87\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511264 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/517b0538-1d7a-495f-af7e-9dcfb71d9cd3-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-mfrws\" (UID: \"517b0538-1d7a-495f-af7e-9dcfb71d9cd3\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-mfrws" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511291 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgmch\" (UniqueName: \"kubernetes.io/projected/6652f16f-304d-4c4a-84dd-97b68a4aa04b-kube-api-access-hgmch\") pod \"marketplace-operator-79b997595-8gcpn\" (UID: \"6652f16f-304d-4c4a-84dd-97b68a4aa04b\") " pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511322 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2zps\" (UniqueName: \"kubernetes.io/projected/fe451263-a8c3-4bf2-a4ea-b6c21fac505a-kube-api-access-v2zps\") pod \"machine-config-server-5lwgz\" (UID: \"fe451263-a8c3-4bf2-a4ea-b6c21fac505a\") " pod="openshift-machine-config-operator/machine-config-server-5lwgz" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511359 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/d4db418f-d110-4268-bf5f-6a1c9e565f84-plugins-dir\") pod \"csi-hostpathplugin-kvwlg\" (UID: \"d4db418f-d110-4268-bf5f-6a1c9e565f84\") " pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511473 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlqfs\" (UniqueName: \"kubernetes.io/projected/848650c8-e32a-4c8a-8aae-9fbfa62ba6b9-kube-api-access-hlqfs\") pod \"service-ca-9c57cc56f-tg7pk\" (UID: \"848650c8-e32a-4c8a-8aae-9fbfa62ba6b9\") " pod="openshift-service-ca/service-ca-9c57cc56f-tg7pk" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511493 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qg79\" (UniqueName: \"kubernetes.io/projected/4ac2d320-4757-43f3-8dcd-77c514918234-kube-api-access-6qg79\") pod \"olm-operator-6b444d44fb-66b7q\" (UID: \"4ac2d320-4757-43f3-8dcd-77c514918234\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511553 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44t9j\" (UniqueName: \"kubernetes.io/projected/4af5c2a5-d804-4267-ac93-799d3699c538-kube-api-access-44t9j\") pod \"service-ca-operator-777779d784-rkqvp\" (UID: \"4af5c2a5-d804-4267-ac93-799d3699c538\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rkqvp" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511569 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/fe451263-a8c3-4bf2-a4ea-b6c21fac505a-certs\") pod \"machine-config-server-5lwgz\" (UID: \"fe451263-a8c3-4bf2-a4ea-b6c21fac505a\") " pod="openshift-machine-config-operator/machine-config-server-5lwgz" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511593 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3-srv-cert\") pod \"catalog-operator-68c6474976-9qx5l\" (UID: \"708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511608 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/317be33e-ccbd-44f6-aafa-f77a1f2ba6eb-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-xq6hp\" (UID: \"317be33e-ccbd-44f6-aafa-f77a1f2ba6eb\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xq6hp" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511649 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8484f8fc-ef23-4e59-9461-bdacf24ded18-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6fgm2\" (UID: \"8484f8fc-ef23-4e59-9461-bdacf24ded18\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6fgm2" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511684 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/848650c8-e32a-4c8a-8aae-9fbfa62ba6b9-signing-cabundle\") pod \"service-ca-9c57cc56f-tg7pk\" (UID: \"848650c8-e32a-4c8a-8aae-9fbfa62ba6b9\") " pod="openshift-service-ca/service-ca-9c57cc56f-tg7pk" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511702 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/d4db418f-d110-4268-bf5f-6a1c9e565f84-csi-data-dir\") pod \"csi-hostpathplugin-kvwlg\" (UID: \"d4db418f-d110-4268-bf5f-6a1c9e565f84\") " pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511718 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/4ac2d320-4757-43f3-8dcd-77c514918234-profile-collector-cert\") pod \"olm-operator-6b444d44fb-66b7q\" (UID: \"4ac2d320-4757-43f3-8dcd-77c514918234\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511768 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpbt5\" (UniqueName: \"kubernetes.io/projected/41ac1391-f116-4d7b-88dd-d694671283dc-kube-api-access-kpbt5\") pod \"collect-profiles-29320860-4bffl\" (UID: \"41ac1391-f116-4d7b-88dd-d694671283dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511796 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/41ac1391-f116-4d7b-88dd-d694671283dc-config-volume\") pod \"collect-profiles-29320860-4bffl\" (UID: \"41ac1391-f116-4d7b-88dd-d694671283dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511850 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/4ac2d320-4757-43f3-8dcd-77c514918234-srv-cert\") pod \"olm-operator-6b444d44fb-66b7q\" (UID: \"4ac2d320-4757-43f3-8dcd-77c514918234\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511914 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pflq\" (UniqueName: \"kubernetes.io/projected/a063af3b-6bf1-45bc-a02d-92679099140c-kube-api-access-7pflq\") pod \"dns-default-scg48\" (UID: \"a063af3b-6bf1-45bc-a02d-92679099140c\") " pod="openshift-dns/dns-default-scg48" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511951 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8xlf\" (UniqueName: \"kubernetes.io/projected/708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3-kube-api-access-g8xlf\") pod \"catalog-operator-68c6474976-9qx5l\" (UID: \"708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.511965 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvlg6\" (UniqueName: \"kubernetes.io/projected/d4db418f-d110-4268-bf5f-6a1c9e565f84-kube-api-access-nvlg6\") pod \"csi-hostpathplugin-kvwlg\" (UID: \"d4db418f-d110-4268-bf5f-6a1c9e565f84\") " pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.512020 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cjbw\" (UniqueName: \"kubernetes.io/projected/317be33e-ccbd-44f6-aafa-f77a1f2ba6eb-kube-api-access-8cjbw\") pod \"control-plane-machine-set-operator-78cbb6b69f-xq6hp\" (UID: \"317be33e-ccbd-44f6-aafa-f77a1f2ba6eb\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xq6hp" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.512051 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8484f8fc-ef23-4e59-9461-bdacf24ded18-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6fgm2\" (UID: \"8484f8fc-ef23-4e59-9461-bdacf24ded18\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6fgm2" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.512067 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/589570b2-6222-4f78-80bb-05f8f926957e-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-9g8j4\" (UID: \"589570b2-6222-4f78-80bb-05f8f926957e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9g8j4" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.512094 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a063af3b-6bf1-45bc-a02d-92679099140c-metrics-tls\") pod \"dns-default-scg48\" (UID: \"a063af3b-6bf1-45bc-a02d-92679099140c\") " pod="openshift-dns/dns-default-scg48" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.512158 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nf87b\" (UniqueName: \"kubernetes.io/projected/cccf2295-4d3d-410a-b491-86f9d85d264b-kube-api-access-nf87b\") pod \"migrator-59844c95c7-96n5p\" (UID: \"cccf2295-4d3d-410a-b491-86f9d85d264b\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-96n5p" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.515414 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6652f16f-304d-4c4a-84dd-97b68a4aa04b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-8gcpn\" (UID: \"6652f16f-304d-4c4a-84dd-97b68a4aa04b\") " pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.515469 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6652f16f-304d-4c4a-84dd-97b68a4aa04b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-8gcpn\" (UID: \"6652f16f-304d-4c4a-84dd-97b68a4aa04b\") " pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.515477 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-fppn6" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.515492 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/d4db418f-d110-4268-bf5f-6a1c9e565f84-registration-dir\") pod \"csi-hostpathplugin-kvwlg\" (UID: \"d4db418f-d110-4268-bf5f-6a1c9e565f84\") " pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.517854 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/41ac1391-f116-4d7b-88dd-d694671283dc-secret-volume\") pod \"collect-profiles-29320860-4bffl\" (UID: \"41ac1391-f116-4d7b-88dd-d694671283dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.517981 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6430a370-0a1f-4a1f-b156-037ae868cf87-bound-sa-token\") pod \"ingress-operator-5b745b69d9-nvtdf\" (UID: \"6430a370-0a1f-4a1f-b156-037ae868cf87\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.518035 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a063af3b-6bf1-45bc-a02d-92679099140c-config-volume\") pod \"dns-default-scg48\" (UID: \"a063af3b-6bf1-45bc-a02d-92679099140c\") " pod="openshift-dns/dns-default-scg48" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.518307 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/fe451263-a8c3-4bf2-a4ea-b6c21fac505a-node-bootstrap-token\") pod \"machine-config-server-5lwgz\" (UID: \"fe451263-a8c3-4bf2-a4ea-b6c21fac505a\") " pod="openshift-machine-config-operator/machine-config-server-5lwgz" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.519398 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.519432 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/81b51d61-ed65-4721-8800-9920be8a34e0-tmpfs\") pod \"packageserver-d55dfcdfc-4m5bh\" (UID: \"81b51d61-ed65-4721-8800-9920be8a34e0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.519464 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8sgxd\" (UniqueName: \"kubernetes.io/projected/589570b2-6222-4f78-80bb-05f8f926957e-kube-api-access-8sgxd\") pod \"package-server-manager-789f6589d5-9g8j4\" (UID: \"589570b2-6222-4f78-80bb-05f8f926957e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9g8j4" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.519565 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c2569fcb-5e0d-486a-8a66-17a5539c2ae0-cert\") pod \"ingress-canary-c494l\" (UID: \"c2569fcb-5e0d-486a-8a66-17a5539c2ae0\") " pod="openshift-ingress-canary/ingress-canary-c494l" Sep 30 17:01:46 crc kubenswrapper[4818]: E0930 17:01:46.520035 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:47.02001616 +0000 UTC m=+153.774287966 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.521831 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/d4db418f-d110-4268-bf5f-6a1c9e565f84-mountpoint-dir\") pod \"csi-hostpathplugin-kvwlg\" (UID: \"d4db418f-d110-4268-bf5f-6a1c9e565f84\") " pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.584531 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-j5fc5"] Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.598346 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-ll9ll"] Sep 30 17:01:46 crc kubenswrapper[4818]: W0930 17:01:46.611522 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod90c5ca01_2b80_48f5_8a53_d7bdebb0af87.slice/crio-6b20dc11e6954723934bba650563b639ab018e0d06c7f6f10d6275a9a63445b1 WatchSource:0}: Error finding container 6b20dc11e6954723934bba650563b639ab018e0d06c7f6f10d6275a9a63445b1: Status 404 returned error can't find the container with id 6b20dc11e6954723934bba650563b639ab018e0d06c7f6f10d6275a9a63445b1 Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.613159 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j5wfn"] Sep 30 17:01:46 crc kubenswrapper[4818]: W0930 17:01:46.623310 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb78fa54a_7566_4b3c_ad3a_59fce0462af7.slice/crio-766549ee600172e09a1e8384370095a643fa4202ff37719e42e68200a3c20a3c WatchSource:0}: Error finding container 766549ee600172e09a1e8384370095a643fa4202ff37719e42e68200a3c20a3c: Status 404 returned error can't find the container with id 766549ee600172e09a1e8384370095a643fa4202ff37719e42e68200a3c20a3c Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.624093 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:46 crc kubenswrapper[4818]: E0930 17:01:46.624411 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:47.12438888 +0000 UTC m=+153.878660706 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.625209 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8484f8fc-ef23-4e59-9461-bdacf24ded18-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6fgm2\" (UID: \"8484f8fc-ef23-4e59-9461-bdacf24ded18\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6fgm2" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.625241 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/589570b2-6222-4f78-80bb-05f8f926957e-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-9g8j4\" (UID: \"589570b2-6222-4f78-80bb-05f8f926957e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9g8j4" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.625262 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a063af3b-6bf1-45bc-a02d-92679099140c-metrics-tls\") pod \"dns-default-scg48\" (UID: \"a063af3b-6bf1-45bc-a02d-92679099140c\") " pod="openshift-dns/dns-default-scg48" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.625281 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nf87b\" (UniqueName: \"kubernetes.io/projected/cccf2295-4d3d-410a-b491-86f9d85d264b-kube-api-access-nf87b\") pod \"migrator-59844c95c7-96n5p\" (UID: \"cccf2295-4d3d-410a-b491-86f9d85d264b\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-96n5p" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.625300 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6652f16f-304d-4c4a-84dd-97b68a4aa04b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-8gcpn\" (UID: \"6652f16f-304d-4c4a-84dd-97b68a4aa04b\") " pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.625315 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6652f16f-304d-4c4a-84dd-97b68a4aa04b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-8gcpn\" (UID: \"6652f16f-304d-4c4a-84dd-97b68a4aa04b\") " pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.625330 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/d4db418f-d110-4268-bf5f-6a1c9e565f84-registration-dir\") pod \"csi-hostpathplugin-kvwlg\" (UID: \"d4db418f-d110-4268-bf5f-6a1c9e565f84\") " pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.625350 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/41ac1391-f116-4d7b-88dd-d694671283dc-secret-volume\") pod \"collect-profiles-29320860-4bffl\" (UID: \"41ac1391-f116-4d7b-88dd-d694671283dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.625380 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a063af3b-6bf1-45bc-a02d-92679099140c-config-volume\") pod \"dns-default-scg48\" (UID: \"a063af3b-6bf1-45bc-a02d-92679099140c\") " pod="openshift-dns/dns-default-scg48" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.625396 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6430a370-0a1f-4a1f-b156-037ae868cf87-bound-sa-token\") pod \"ingress-operator-5b745b69d9-nvtdf\" (UID: \"6430a370-0a1f-4a1f-b156-037ae868cf87\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.625412 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/fe451263-a8c3-4bf2-a4ea-b6c21fac505a-node-bootstrap-token\") pod \"machine-config-server-5lwgz\" (UID: \"fe451263-a8c3-4bf2-a4ea-b6c21fac505a\") " pod="openshift-machine-config-operator/machine-config-server-5lwgz" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.625432 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.625447 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/81b51d61-ed65-4721-8800-9920be8a34e0-tmpfs\") pod \"packageserver-d55dfcdfc-4m5bh\" (UID: \"81b51d61-ed65-4721-8800-9920be8a34e0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.625460 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8sgxd\" (UniqueName: \"kubernetes.io/projected/589570b2-6222-4f78-80bb-05f8f926957e-kube-api-access-8sgxd\") pod \"package-server-manager-789f6589d5-9g8j4\" (UID: \"589570b2-6222-4f78-80bb-05f8f926957e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9g8j4" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.625483 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c2569fcb-5e0d-486a-8a66-17a5539c2ae0-cert\") pod \"ingress-canary-c494l\" (UID: \"c2569fcb-5e0d-486a-8a66-17a5539c2ae0\") " pod="openshift-ingress-canary/ingress-canary-c494l" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.625498 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/d4db418f-d110-4268-bf5f-6a1c9e565f84-mountpoint-dir\") pod \"csi-hostpathplugin-kvwlg\" (UID: \"d4db418f-d110-4268-bf5f-6a1c9e565f84\") " pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.625514 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3-profile-collector-cert\") pod \"catalog-operator-68c6474976-9qx5l\" (UID: \"708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.625550 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/848650c8-e32a-4c8a-8aae-9fbfa62ba6b9-signing-key\") pod \"service-ca-9c57cc56f-tg7pk\" (UID: \"848650c8-e32a-4c8a-8aae-9fbfa62ba6b9\") " pod="openshift-service-ca/service-ca-9c57cc56f-tg7pk" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.625565 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/81b51d61-ed65-4721-8800-9920be8a34e0-webhook-cert\") pod \"packageserver-d55dfcdfc-4m5bh\" (UID: \"81b51d61-ed65-4721-8800-9920be8a34e0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626468 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8484f8fc-ef23-4e59-9461-bdacf24ded18-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6fgm2\" (UID: \"8484f8fc-ef23-4e59-9461-bdacf24ded18\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6fgm2" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626501 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hw7tb\" (UniqueName: \"kubernetes.io/projected/6430a370-0a1f-4a1f-b156-037ae868cf87-kube-api-access-hw7tb\") pod \"ingress-operator-5b745b69d9-nvtdf\" (UID: \"6430a370-0a1f-4a1f-b156-037ae868cf87\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626519 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4af5c2a5-d804-4267-ac93-799d3699c538-config\") pod \"service-ca-operator-777779d784-rkqvp\" (UID: \"4af5c2a5-d804-4267-ac93-799d3699c538\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rkqvp" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626535 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4af5c2a5-d804-4267-ac93-799d3699c538-serving-cert\") pod \"service-ca-operator-777779d784-rkqvp\" (UID: \"4af5c2a5-d804-4267-ac93-799d3699c538\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rkqvp" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626561 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwm69\" (UniqueName: \"kubernetes.io/projected/517b0538-1d7a-495f-af7e-9dcfb71d9cd3-kube-api-access-nwm69\") pod \"multus-admission-controller-857f4d67dd-mfrws\" (UID: \"517b0538-1d7a-495f-af7e-9dcfb71d9cd3\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-mfrws" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626578 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28fjj\" (UniqueName: \"kubernetes.io/projected/81b51d61-ed65-4721-8800-9920be8a34e0-kube-api-access-28fjj\") pod \"packageserver-d55dfcdfc-4m5bh\" (UID: \"81b51d61-ed65-4721-8800-9920be8a34e0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626593 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/81b51d61-ed65-4721-8800-9920be8a34e0-apiservice-cert\") pod \"packageserver-d55dfcdfc-4m5bh\" (UID: \"81b51d61-ed65-4721-8800-9920be8a34e0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626621 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6430a370-0a1f-4a1f-b156-037ae868cf87-metrics-tls\") pod \"ingress-operator-5b745b69d9-nvtdf\" (UID: \"6430a370-0a1f-4a1f-b156-037ae868cf87\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626659 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78k2w\" (UniqueName: \"kubernetes.io/projected/c2569fcb-5e0d-486a-8a66-17a5539c2ae0-kube-api-access-78k2w\") pod \"ingress-canary-c494l\" (UID: \"c2569fcb-5e0d-486a-8a66-17a5539c2ae0\") " pod="openshift-ingress-canary/ingress-canary-c494l" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626675 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/d4db418f-d110-4268-bf5f-6a1c9e565f84-socket-dir\") pod \"csi-hostpathplugin-kvwlg\" (UID: \"d4db418f-d110-4268-bf5f-6a1c9e565f84\") " pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626697 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6430a370-0a1f-4a1f-b156-037ae868cf87-trusted-ca\") pod \"ingress-operator-5b745b69d9-nvtdf\" (UID: \"6430a370-0a1f-4a1f-b156-037ae868cf87\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626714 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/517b0538-1d7a-495f-af7e-9dcfb71d9cd3-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-mfrws\" (UID: \"517b0538-1d7a-495f-af7e-9dcfb71d9cd3\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-mfrws" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626730 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgmch\" (UniqueName: \"kubernetes.io/projected/6652f16f-304d-4c4a-84dd-97b68a4aa04b-kube-api-access-hgmch\") pod \"marketplace-operator-79b997595-8gcpn\" (UID: \"6652f16f-304d-4c4a-84dd-97b68a4aa04b\") " pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626751 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2zps\" (UniqueName: \"kubernetes.io/projected/fe451263-a8c3-4bf2-a4ea-b6c21fac505a-kube-api-access-v2zps\") pod \"machine-config-server-5lwgz\" (UID: \"fe451263-a8c3-4bf2-a4ea-b6c21fac505a\") " pod="openshift-machine-config-operator/machine-config-server-5lwgz" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626767 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/d4db418f-d110-4268-bf5f-6a1c9e565f84-plugins-dir\") pod \"csi-hostpathplugin-kvwlg\" (UID: \"d4db418f-d110-4268-bf5f-6a1c9e565f84\") " pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626785 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlqfs\" (UniqueName: \"kubernetes.io/projected/848650c8-e32a-4c8a-8aae-9fbfa62ba6b9-kube-api-access-hlqfs\") pod \"service-ca-9c57cc56f-tg7pk\" (UID: \"848650c8-e32a-4c8a-8aae-9fbfa62ba6b9\") " pod="openshift-service-ca/service-ca-9c57cc56f-tg7pk" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626802 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qg79\" (UniqueName: \"kubernetes.io/projected/4ac2d320-4757-43f3-8dcd-77c514918234-kube-api-access-6qg79\") pod \"olm-operator-6b444d44fb-66b7q\" (UID: \"4ac2d320-4757-43f3-8dcd-77c514918234\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626820 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44t9j\" (UniqueName: \"kubernetes.io/projected/4af5c2a5-d804-4267-ac93-799d3699c538-kube-api-access-44t9j\") pod \"service-ca-operator-777779d784-rkqvp\" (UID: \"4af5c2a5-d804-4267-ac93-799d3699c538\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rkqvp" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626835 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/fe451263-a8c3-4bf2-a4ea-b6c21fac505a-certs\") pod \"machine-config-server-5lwgz\" (UID: \"fe451263-a8c3-4bf2-a4ea-b6c21fac505a\") " pod="openshift-machine-config-operator/machine-config-server-5lwgz" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626851 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3-srv-cert\") pod \"catalog-operator-68c6474976-9qx5l\" (UID: \"708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626869 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/317be33e-ccbd-44f6-aafa-f77a1f2ba6eb-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-xq6hp\" (UID: \"317be33e-ccbd-44f6-aafa-f77a1f2ba6eb\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xq6hp" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626888 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8484f8fc-ef23-4e59-9461-bdacf24ded18-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6fgm2\" (UID: \"8484f8fc-ef23-4e59-9461-bdacf24ded18\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6fgm2" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626905 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/d4db418f-d110-4268-bf5f-6a1c9e565f84-csi-data-dir\") pod \"csi-hostpathplugin-kvwlg\" (UID: \"d4db418f-d110-4268-bf5f-6a1c9e565f84\") " pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626934 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/4ac2d320-4757-43f3-8dcd-77c514918234-profile-collector-cert\") pod \"olm-operator-6b444d44fb-66b7q\" (UID: \"4ac2d320-4757-43f3-8dcd-77c514918234\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626958 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/848650c8-e32a-4c8a-8aae-9fbfa62ba6b9-signing-cabundle\") pod \"service-ca-9c57cc56f-tg7pk\" (UID: \"848650c8-e32a-4c8a-8aae-9fbfa62ba6b9\") " pod="openshift-service-ca/service-ca-9c57cc56f-tg7pk" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626977 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpbt5\" (UniqueName: \"kubernetes.io/projected/41ac1391-f116-4d7b-88dd-d694671283dc-kube-api-access-kpbt5\") pod \"collect-profiles-29320860-4bffl\" (UID: \"41ac1391-f116-4d7b-88dd-d694671283dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.626995 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/41ac1391-f116-4d7b-88dd-d694671283dc-config-volume\") pod \"collect-profiles-29320860-4bffl\" (UID: \"41ac1391-f116-4d7b-88dd-d694671283dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.627025 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/4ac2d320-4757-43f3-8dcd-77c514918234-srv-cert\") pod \"olm-operator-6b444d44fb-66b7q\" (UID: \"4ac2d320-4757-43f3-8dcd-77c514918234\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.627052 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pflq\" (UniqueName: \"kubernetes.io/projected/a063af3b-6bf1-45bc-a02d-92679099140c-kube-api-access-7pflq\") pod \"dns-default-scg48\" (UID: \"a063af3b-6bf1-45bc-a02d-92679099140c\") " pod="openshift-dns/dns-default-scg48" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.627072 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8xlf\" (UniqueName: \"kubernetes.io/projected/708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3-kube-api-access-g8xlf\") pod \"catalog-operator-68c6474976-9qx5l\" (UID: \"708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.627088 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvlg6\" (UniqueName: \"kubernetes.io/projected/d4db418f-d110-4268-bf5f-6a1c9e565f84-kube-api-access-nvlg6\") pod \"csi-hostpathplugin-kvwlg\" (UID: \"d4db418f-d110-4268-bf5f-6a1c9e565f84\") " pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.627106 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cjbw\" (UniqueName: \"kubernetes.io/projected/317be33e-ccbd-44f6-aafa-f77a1f2ba6eb-kube-api-access-8cjbw\") pod \"control-plane-machine-set-operator-78cbb6b69f-xq6hp\" (UID: \"317be33e-ccbd-44f6-aafa-f77a1f2ba6eb\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xq6hp" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.633497 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6652f16f-304d-4c4a-84dd-97b68a4aa04b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-8gcpn\" (UID: \"6652f16f-304d-4c4a-84dd-97b68a4aa04b\") " pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.634149 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/d4db418f-d110-4268-bf5f-6a1c9e565f84-socket-dir\") pod \"csi-hostpathplugin-kvwlg\" (UID: \"d4db418f-d110-4268-bf5f-6a1c9e565f84\") " pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.635001 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6430a370-0a1f-4a1f-b156-037ae868cf87-trusted-ca\") pod \"ingress-operator-5b745b69d9-nvtdf\" (UID: \"6430a370-0a1f-4a1f-b156-037ae868cf87\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.636563 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a063af3b-6bf1-45bc-a02d-92679099140c-metrics-tls\") pod \"dns-default-scg48\" (UID: \"a063af3b-6bf1-45bc-a02d-92679099140c\") " pod="openshift-dns/dns-default-scg48" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.639667 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/d4db418f-d110-4268-bf5f-6a1c9e565f84-plugins-dir\") pod \"csi-hostpathplugin-kvwlg\" (UID: \"d4db418f-d110-4268-bf5f-6a1c9e565f84\") " pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.640501 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/41ac1391-f116-4d7b-88dd-d694671283dc-config-volume\") pod \"collect-profiles-29320860-4bffl\" (UID: \"41ac1391-f116-4d7b-88dd-d694671283dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.640574 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/d4db418f-d110-4268-bf5f-6a1c9e565f84-csi-data-dir\") pod \"csi-hostpathplugin-kvwlg\" (UID: \"d4db418f-d110-4268-bf5f-6a1c9e565f84\") " pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:46 crc kubenswrapper[4818]: E0930 17:01:46.642174 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:47.142154394 +0000 UTC m=+153.896426280 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.642637 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/81b51d61-ed65-4721-8800-9920be8a34e0-tmpfs\") pod \"packageserver-d55dfcdfc-4m5bh\" (UID: \"81b51d61-ed65-4721-8800-9920be8a34e0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.643296 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3-profile-collector-cert\") pod \"catalog-operator-68c6474976-9qx5l\" (UID: \"708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.643765 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4af5c2a5-d804-4267-ac93-799d3699c538-config\") pod \"service-ca-operator-777779d784-rkqvp\" (UID: \"4af5c2a5-d804-4267-ac93-799d3699c538\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rkqvp" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.643821 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8484f8fc-ef23-4e59-9461-bdacf24ded18-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6fgm2\" (UID: \"8484f8fc-ef23-4e59-9461-bdacf24ded18\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6fgm2" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.645156 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/d4db418f-d110-4268-bf5f-6a1c9e565f84-registration-dir\") pod \"csi-hostpathplugin-kvwlg\" (UID: \"d4db418f-d110-4268-bf5f-6a1c9e565f84\") " pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.645414 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/d4db418f-d110-4268-bf5f-6a1c9e565f84-mountpoint-dir\") pod \"csi-hostpathplugin-kvwlg\" (UID: \"d4db418f-d110-4268-bf5f-6a1c9e565f84\") " pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.650761 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/317be33e-ccbd-44f6-aafa-f77a1f2ba6eb-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-xq6hp\" (UID: \"317be33e-ccbd-44f6-aafa-f77a1f2ba6eb\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xq6hp" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.652024 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/517b0538-1d7a-495f-af7e-9dcfb71d9cd3-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-mfrws\" (UID: \"517b0538-1d7a-495f-af7e-9dcfb71d9cd3\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-mfrws" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.652073 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3-srv-cert\") pod \"catalog-operator-68c6474976-9qx5l\" (UID: \"708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.652410 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/fe451263-a8c3-4bf2-a4ea-b6c21fac505a-certs\") pod \"machine-config-server-5lwgz\" (UID: \"fe451263-a8c3-4bf2-a4ea-b6c21fac505a\") " pod="openshift-machine-config-operator/machine-config-server-5lwgz" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.652751 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a063af3b-6bf1-45bc-a02d-92679099140c-config-volume\") pod \"dns-default-scg48\" (UID: \"a063af3b-6bf1-45bc-a02d-92679099140c\") " pod="openshift-dns/dns-default-scg48" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.657215 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6430a370-0a1f-4a1f-b156-037ae868cf87-metrics-tls\") pod \"ingress-operator-5b745b69d9-nvtdf\" (UID: \"6430a370-0a1f-4a1f-b156-037ae868cf87\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.657433 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/589570b2-6222-4f78-80bb-05f8f926957e-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-9g8j4\" (UID: \"589570b2-6222-4f78-80bb-05f8f926957e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9g8j4" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.658354 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/81b51d61-ed65-4721-8800-9920be8a34e0-webhook-cert\") pod \"packageserver-d55dfcdfc-4m5bh\" (UID: \"81b51d61-ed65-4721-8800-9920be8a34e0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.658698 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/848650c8-e32a-4c8a-8aae-9fbfa62ba6b9-signing-cabundle\") pod \"service-ca-9c57cc56f-tg7pk\" (UID: \"848650c8-e32a-4c8a-8aae-9fbfa62ba6b9\") " pod="openshift-service-ca/service-ca-9c57cc56f-tg7pk" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.665171 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/81b51d61-ed65-4721-8800-9920be8a34e0-apiservice-cert\") pod \"packageserver-d55dfcdfc-4m5bh\" (UID: \"81b51d61-ed65-4721-8800-9920be8a34e0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.665604 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6652f16f-304d-4c4a-84dd-97b68a4aa04b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-8gcpn\" (UID: \"6652f16f-304d-4c4a-84dd-97b68a4aa04b\") " pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.665997 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4af5c2a5-d804-4267-ac93-799d3699c538-serving-cert\") pod \"service-ca-operator-777779d784-rkqvp\" (UID: \"4af5c2a5-d804-4267-ac93-799d3699c538\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rkqvp" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.668283 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8484f8fc-ef23-4e59-9461-bdacf24ded18-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6fgm2\" (UID: \"8484f8fc-ef23-4e59-9461-bdacf24ded18\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6fgm2" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.668293 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2"] Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.669528 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/41ac1391-f116-4d7b-88dd-d694671283dc-secret-volume\") pod \"collect-profiles-29320860-4bffl\" (UID: \"41ac1391-f116-4d7b-88dd-d694671283dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.670515 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/fe451263-a8c3-4bf2-a4ea-b6c21fac505a-node-bootstrap-token\") pod \"machine-config-server-5lwgz\" (UID: \"fe451263-a8c3-4bf2-a4ea-b6c21fac505a\") " pod="openshift-machine-config-operator/machine-config-server-5lwgz" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.672104 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/4ac2d320-4757-43f3-8dcd-77c514918234-srv-cert\") pod \"olm-operator-6b444d44fb-66b7q\" (UID: \"4ac2d320-4757-43f3-8dcd-77c514918234\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.676575 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c2569fcb-5e0d-486a-8a66-17a5539c2ae0-cert\") pod \"ingress-canary-c494l\" (UID: \"c2569fcb-5e0d-486a-8a66-17a5539c2ae0\") " pod="openshift-ingress-canary/ingress-canary-c494l" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.681440 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/848650c8-e32a-4c8a-8aae-9fbfa62ba6b9-signing-key\") pod \"service-ca-9c57cc56f-tg7pk\" (UID: \"848650c8-e32a-4c8a-8aae-9fbfa62ba6b9\") " pod="openshift-service-ca/service-ca-9c57cc56f-tg7pk" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.685074 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/4ac2d320-4757-43f3-8dcd-77c514918234-profile-collector-cert\") pod \"olm-operator-6b444d44fb-66b7q\" (UID: \"4ac2d320-4757-43f3-8dcd-77c514918234\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.701125 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cjbw\" (UniqueName: \"kubernetes.io/projected/317be33e-ccbd-44f6-aafa-f77a1f2ba6eb-kube-api-access-8cjbw\") pod \"control-plane-machine-set-operator-78cbb6b69f-xq6hp\" (UID: \"317be33e-ccbd-44f6-aafa-f77a1f2ba6eb\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xq6hp" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.714649 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-w66fc"] Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.716014 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nf87b\" (UniqueName: \"kubernetes.io/projected/cccf2295-4d3d-410a-b491-86f9d85d264b-kube-api-access-nf87b\") pod \"migrator-59844c95c7-96n5p\" (UID: \"cccf2295-4d3d-410a-b491-86f9d85d264b\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-96n5p" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.729423 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8484f8fc-ef23-4e59-9461-bdacf24ded18-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6fgm2\" (UID: \"8484f8fc-ef23-4e59-9461-bdacf24ded18\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6fgm2" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.733263 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6fgm2" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.733478 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:46 crc kubenswrapper[4818]: E0930 17:01:46.733848 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:47.233831265 +0000 UTC m=+153.988103081 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.733908 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: E0930 17:01:46.734210 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:47.234190855 +0000 UTC m=+153.988462671 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.752668 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qg79\" (UniqueName: \"kubernetes.io/projected/4ac2d320-4757-43f3-8dcd-77c514918234-kube-api-access-6qg79\") pod \"olm-operator-6b444d44fb-66b7q\" (UID: \"4ac2d320-4757-43f3-8dcd-77c514918234\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.752799 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78k2w\" (UniqueName: \"kubernetes.io/projected/c2569fcb-5e0d-486a-8a66-17a5539c2ae0-kube-api-access-78k2w\") pod \"ingress-canary-c494l\" (UID: \"c2569fcb-5e0d-486a-8a66-17a5539c2ae0\") " pod="openshift-ingress-canary/ingress-canary-c494l" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.771230 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlqfs\" (UniqueName: \"kubernetes.io/projected/848650c8-e32a-4c8a-8aae-9fbfa62ba6b9-kube-api-access-hlqfs\") pod \"service-ca-9c57cc56f-tg7pk\" (UID: \"848650c8-e32a-4c8a-8aae-9fbfa62ba6b9\") " pod="openshift-service-ca/service-ca-9c57cc56f-tg7pk" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.792584 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2zps\" (UniqueName: \"kubernetes.io/projected/fe451263-a8c3-4bf2-a4ea-b6c21fac505a-kube-api-access-v2zps\") pod \"machine-config-server-5lwgz\" (UID: \"fe451263-a8c3-4bf2-a4ea-b6c21fac505a\") " pod="openshift-machine-config-operator/machine-config-server-5lwgz" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.794525 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-c494l" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.804002 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-5lwgz" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.826566 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgmch\" (UniqueName: \"kubernetes.io/projected/6652f16f-304d-4c4a-84dd-97b68a4aa04b-kube-api-access-hgmch\") pod \"marketplace-operator-79b997595-8gcpn\" (UID: \"6652f16f-304d-4c4a-84dd-97b68a4aa04b\") " pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.834483 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pflq\" (UniqueName: \"kubernetes.io/projected/a063af3b-6bf1-45bc-a02d-92679099140c-kube-api-access-7pflq\") pod \"dns-default-scg48\" (UID: \"a063af3b-6bf1-45bc-a02d-92679099140c\") " pod="openshift-dns/dns-default-scg48" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.835049 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:46 crc kubenswrapper[4818]: E0930 17:01:46.835561 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:47.33554592 +0000 UTC m=+154.089817736 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.850317 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8xlf\" (UniqueName: \"kubernetes.io/projected/708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3-kube-api-access-g8xlf\") pod \"catalog-operator-68c6474976-9qx5l\" (UID: \"708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.874498 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hw7tb\" (UniqueName: \"kubernetes.io/projected/6430a370-0a1f-4a1f-b156-037ae868cf87-kube-api-access-hw7tb\") pod \"ingress-operator-5b745b69d9-nvtdf\" (UID: \"6430a370-0a1f-4a1f-b156-037ae868cf87\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.899648 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8sgxd\" (UniqueName: \"kubernetes.io/projected/589570b2-6222-4f78-80bb-05f8f926957e-kube-api-access-8sgxd\") pod \"package-server-manager-789f6589d5-9g8j4\" (UID: \"589570b2-6222-4f78-80bb-05f8f926957e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9g8j4" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.911287 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" event={"ID":"0e461d8c-b64b-4a75-8bab-2056dfd16821","Type":"ContainerStarted","Data":"ad83034f2efc928811c1b45b1ec764787678bfad938763f275013d17f9080336"} Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.913099 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j5wfn" event={"ID":"fe65e822-d6e5-4427-ae15-a91ce81f90a5","Type":"ContainerStarted","Data":"9e098365b91de8c4750eb71b48c47a7aa13ef637472206b297130d01022e6576"} Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.914074 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2" event={"ID":"7647753f-a8c4-498f-b876-7553155a6159","Type":"ContainerStarted","Data":"7ce782c5e7324fa1450580266e76a81718e13c46d677947d1c919dc98f415bef"} Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.918936 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvlg6\" (UniqueName: \"kubernetes.io/projected/d4db418f-d110-4268-bf5f-6a1c9e565f84-kube-api-access-nvlg6\") pod \"csi-hostpathplugin-kvwlg\" (UID: \"d4db418f-d110-4268-bf5f-6a1c9e565f84\") " pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.933498 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.934220 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-96n5p" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.936126 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:46 crc kubenswrapper[4818]: E0930 17:01:46.936527 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:47.436513164 +0000 UTC m=+154.190784990 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.938020 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28fjj\" (UniqueName: \"kubernetes.io/projected/81b51d61-ed65-4721-8800-9920be8a34e0-kube-api-access-28fjj\") pod \"packageserver-d55dfcdfc-4m5bh\" (UID: \"81b51d61-ed65-4721-8800-9920be8a34e0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.940738 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xq6hp" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.968120 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-gnmfp" event={"ID":"d8970c17-d95f-454b-ac56-db24223ef2fc","Type":"ContainerStarted","Data":"a9126eccce5fd1d0307938749a35fc92803fadc402b3da982e38c4dc1a9d754d"} Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.968174 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-gnmfp" event={"ID":"d8970c17-d95f-454b-ac56-db24223ef2fc","Type":"ContainerStarted","Data":"cfe814002fef8eab70f1d377bd5dad209bf795cce04be4d237b4ceb793b03273"} Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.968687 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.971159 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwm69\" (UniqueName: \"kubernetes.io/projected/517b0538-1d7a-495f-af7e-9dcfb71d9cd3-kube-api-access-nwm69\") pod \"multus-admission-controller-857f4d67dd-mfrws\" (UID: \"517b0538-1d7a-495f-af7e-9dcfb71d9cd3\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-mfrws" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.971936 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-q8cn5"] Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.985268 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6430a370-0a1f-4a1f-b156-037ae868cf87-bound-sa-token\") pod \"ingress-operator-5b745b69d9-nvtdf\" (UID: \"6430a370-0a1f-4a1f-b156-037ae868cf87\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.986423 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l" Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.991459 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ll9ll" event={"ID":"90c5ca01-2b80-48f5-8a53-d7bdebb0af87","Type":"ContainerStarted","Data":"6b20dc11e6954723934bba650563b639ab018e0d06c7f6f10d6275a9a63445b1"} Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.992943 4818 generic.go:334] "Generic (PLEG): container finished" podID="95508e9a-38fe-4c90-83ee-1b87733b07a6" containerID="c85065474f92c194298ade2e3d442f2b70b63a6ac78f0319318c965b65c0c46d" exitCode=0 Sep 30 17:01:46 crc kubenswrapper[4818]: I0930 17:01:46.992986 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" event={"ID":"95508e9a-38fe-4c90-83ee-1b87733b07a6","Type":"ContainerDied","Data":"c85065474f92c194298ade2e3d442f2b70b63a6ac78f0319318c965b65c0c46d"} Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.006684 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4chv9" event={"ID":"e3243d1f-1bcb-48cf-83e2-7e6eeaf15126","Type":"ContainerStarted","Data":"13765e2b11262f9a542a2a7d9bee6e1dd432e8298aeaf3844da9ceacb192b215"} Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.008611 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpbt5\" (UniqueName: \"kubernetes.io/projected/41ac1391-f116-4d7b-88dd-d694671283dc-kube-api-access-kpbt5\") pod \"collect-profiles-29320860-4bffl\" (UID: \"41ac1391-f116-4d7b-88dd-d694671283dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl" Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.010167 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-tg7pk" Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.023458 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-scg48" Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.039519 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-j5fc5" event={"ID":"b78fa54a-7566-4b3c-ad3a-59fce0462af7","Type":"ContainerStarted","Data":"766549ee600172e09a1e8384370095a643fa4202ff37719e42e68200a3c20a3c"} Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.039854 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.039890 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44t9j\" (UniqueName: \"kubernetes.io/projected/4af5c2a5-d804-4267-ac93-799d3699c538-kube-api-access-44t9j\") pod \"service-ca-operator-777779d784-rkqvp\" (UID: \"4af5c2a5-d804-4267-ac93-799d3699c538\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rkqvp" Sep 30 17:01:47 crc kubenswrapper[4818]: E0930 17:01:47.040037 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:47.54001903 +0000 UTC m=+154.294290836 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.040070 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:47 crc kubenswrapper[4818]: E0930 17:01:47.040771 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:47.540757571 +0000 UTC m=+154.295029387 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.043450 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.050413 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl" Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.050580 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt" event={"ID":"dde52314-bdf5-4c51-90e2-a258a21ec712","Type":"ContainerStarted","Data":"6d0d634e4b90b8f0a5705e23d4d50858a761578ab8b1def3d612b0ded99ab8b1"} Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.050916 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt" Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.063421 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" event={"ID":"28688dd1-2bb0-4ec9-a7a5-c9683aaa7f33","Type":"ContainerStarted","Data":"0af5051f7474d3d00a9207686518456e73aeeca0b3553fc94ebf4b5b3c272919"} Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.065770 4818 patch_prober.go:28] interesting pod/downloads-7954f5f757-7r27t container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.065823 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7r27t" podUID="8b00b5de-92e6-45ef-bd66-2f06b0b0e249" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.074792 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.079167 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.096493 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.102597 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-m7rls" Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.144176 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:47 crc kubenswrapper[4818]: E0930 17:01:47.144514 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:47.644498473 +0000 UTC m=+154.398770289 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.146020 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:47 crc kubenswrapper[4818]: E0930 17:01:47.152964 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:47.652949663 +0000 UTC m=+154.407221479 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.187963 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-888xq"] Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.188316 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9g8j4" Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.217197 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf" Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.238117 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.239530 4818 patch_prober.go:28] interesting pod/router-default-5444994796-gnmfp container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.239580 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gnmfp" podUID="d8970c17-d95f-454b-ac56-db24223ef2fc" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.252106 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-mfrws" Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.252290 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:47 crc kubenswrapper[4818]: E0930 17:01:47.252825 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:47.752806966 +0000 UTC m=+154.507078782 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.266854 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-rkqvp" Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.354813 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:47 crc kubenswrapper[4818]: E0930 17:01:47.355553 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:47.85553548 +0000 UTC m=+154.609807296 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.375171 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-fppn6"] Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.375231 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-c494l"] Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.387165 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5n9lk"] Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.421378 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-fbfxd"] Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.457054 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:47 crc kubenswrapper[4818]: E0930 17:01:47.457203 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:47.957185253 +0000 UTC m=+154.711457059 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.457613 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:47 crc kubenswrapper[4818]: E0930 17:01:47.457807 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:47.95779977 +0000 UTC m=+154.712071586 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.498309 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6fgm2"] Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.558312 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:47 crc kubenswrapper[4818]: E0930 17:01:47.558816 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:48.058796485 +0000 UTC m=+154.813068301 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.558876 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:47 crc kubenswrapper[4818]: E0930 17:01:47.559189 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:48.059181656 +0000 UTC m=+154.813453472 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.661778 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:47 crc kubenswrapper[4818]: E0930 17:01:47.662096 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:48.162073595 +0000 UTC m=+154.916345411 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.763859 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:47 crc kubenswrapper[4818]: E0930 17:01:47.764272 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:48.264258043 +0000 UTC m=+155.018529859 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.814544 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q"] Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.866734 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:47 crc kubenswrapper[4818]: E0930 17:01:47.867045 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:48.367032168 +0000 UTC m=+155.121303984 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.948793 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-x4d8s" podStartSLOduration=128.948776967 podStartE2EDuration="2m8.948776967s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:47.946744289 +0000 UTC m=+154.701016105" watchObservedRunningTime="2025-09-30 17:01:47.948776967 +0000 UTC m=+154.703048783" Sep 30 17:01:47 crc kubenswrapper[4818]: I0930 17:01:47.968791 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:47 crc kubenswrapper[4818]: E0930 17:01:47.969338 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:48.46932723 +0000 UTC m=+155.223599036 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.022339 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-gnmfp" podStartSLOduration=129.022320783 podStartE2EDuration="2m9.022320783s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:47.982558895 +0000 UTC m=+154.736830711" watchObservedRunningTime="2025-09-30 17:01:48.022320783 +0000 UTC m=+154.776592599" Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.072303 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-p8vq2" podStartSLOduration=129.07228435 podStartE2EDuration="2m9.07228435s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:48.061292559 +0000 UTC m=+154.815564385" watchObservedRunningTime="2025-09-30 17:01:48.07228435 +0000 UTC m=+154.826556166" Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.073773 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:48 crc kubenswrapper[4818]: E0930 17:01:48.074117 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:48.574104282 +0000 UTC m=+155.328376098 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.091081 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-7r27t" podStartSLOduration=129.091061613 podStartE2EDuration="2m9.091061613s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:48.0892039 +0000 UTC m=+154.843475706" watchObservedRunningTime="2025-09-30 17:01:48.091061613 +0000 UTC m=+154.845333429" Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.107501 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2" event={"ID":"7647753f-a8c4-498f-b876-7553155a6159","Type":"ContainerStarted","Data":"ccf22a219fa8b5ccd4279d2d79239e4d59afde55aef6423e9bdfe52992ce440a"} Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.175605 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:48 crc kubenswrapper[4818]: E0930 17:01:48.190384 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:48.6903666 +0000 UTC m=+155.444638416 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.220019 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cgfst" podStartSLOduration=130.22000052 podStartE2EDuration="2m10.22000052s" podCreationTimestamp="2025-09-30 16:59:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:48.18718573 +0000 UTC m=+154.941457546" watchObservedRunningTime="2025-09-30 17:01:48.22000052 +0000 UTC m=+154.974272336" Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.229426 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-5lwgz" event={"ID":"fe451263-a8c3-4bf2-a4ea-b6c21fac505a","Type":"ContainerStarted","Data":"4f48b8b075839d41935ba66107e4c220db6738c99bbcfd0d705ca758be1ce880"} Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.261220 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ll9ll" event={"ID":"90c5ca01-2b80-48f5-8a53-d7bdebb0af87","Type":"ContainerStarted","Data":"d177c944eb9391ce02add82eda86f4d2bac762d2c734685dbfb49fbe8c04046f"} Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.261281 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ll9ll" event={"ID":"90c5ca01-2b80-48f5-8a53-d7bdebb0af87","Type":"ContainerStarted","Data":"8bd408630d1b20faa0b5b239198cb3acbc0003965bf3291ce1e80172df7c997e"} Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.283737 4818 patch_prober.go:28] interesting pod/router-default-5444994796-gnmfp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 30 17:01:48 crc kubenswrapper[4818]: [-]has-synced failed: reason withheld Sep 30 17:01:48 crc kubenswrapper[4818]: [+]process-running ok Sep 30 17:01:48 crc kubenswrapper[4818]: healthz check failed Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.283780 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gnmfp" podUID="d8970c17-d95f-454b-ac56-db24223ef2fc" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.287212 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-fppn6" event={"ID":"445ee21c-87e4-4bd8-9550-0ed6c16933f7","Type":"ContainerStarted","Data":"c399476f029d554827b273952f52791d0901eee0094e35b065e924f391361438"} Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.304291 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-j5fc5" event={"ID":"b78fa54a-7566-4b3c-ad3a-59fce0462af7","Type":"ContainerStarted","Data":"9d9be75b2721525cde9888581e4d4e94d3b4c8681c7d5b7d30dc7157e9a43920"} Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.310505 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:48 crc kubenswrapper[4818]: E0930 17:01:48.312908 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:48.812892635 +0000 UTC m=+155.567164451 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.344017 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4chv9" podStartSLOduration=130.340275632 podStartE2EDuration="2m10.340275632s" podCreationTimestamp="2025-09-30 16:59:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:48.297844609 +0000 UTC m=+155.052116435" watchObservedRunningTime="2025-09-30 17:01:48.340275632 +0000 UTC m=+155.094547448" Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.349643 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5n9lk" event={"ID":"bc973df4-6c8f-448c-a1f8-609fd7526f3e","Type":"ContainerStarted","Data":"dfb7d86d623f164783b1a9984ad8c30cca0fe28ad84b3b40a655811f75082b83"} Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.352513 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q" event={"ID":"4ac2d320-4757-43f3-8dcd-77c514918234","Type":"ContainerStarted","Data":"f0dadbfb6b3ebcc64a2fd4cba2a63db843f8a750cba75909400399d8c4b1e94b"} Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.356863 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j5wfn" event={"ID":"fe65e822-d6e5-4427-ae15-a91ce81f90a5","Type":"ContainerStarted","Data":"7fa76937324125941188c49c88b0490f3a68ddddf2157a1e12432a268333a18d"} Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.367601 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-c494l" event={"ID":"c2569fcb-5e0d-486a-8a66-17a5539c2ae0","Type":"ContainerStarted","Data":"46e242e436b01685dcca2b1d75aee7bc98db1fa7b7d68b18e229e1720788e47d"} Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.374911 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-96n5p"] Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.383125 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-q8cn5" event={"ID":"133ce1d2-cc16-41f9-b136-d197c20847d1","Type":"ContainerStarted","Data":"b78330a3f9fa5e98f35f9ec13bf85a4434491fafa60b720dba6cab37dc25fa7f"} Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.393561 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" event={"ID":"0e461d8c-b64b-4a75-8bab-2056dfd16821","Type":"ContainerStarted","Data":"5f5c02cfd45248a01fc2597b9af9ea638407a21f3e2aec2c6e1d9c8fab5bfd1d"} Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.400234 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" event={"ID":"94a6a5c0-845a-4f60-b111-eb28393fb07c","Type":"ContainerStarted","Data":"bc34f2d6ad05daed687d4edc072c9f9f93588e920c81891cd7b03bdf21bba187"} Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.414055 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:48 crc kubenswrapper[4818]: E0930 17:01:48.415756 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:48.915744633 +0000 UTC m=+155.670016439 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.439651 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" event={"ID":"f6918a12-03b6-4867-8598-8739f036c746","Type":"ContainerStarted","Data":"69fd974841f60fe050b56685cd6446ff501fc11e0ff1a94fdd9188066fed322b"} Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.439693 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" event={"ID":"f6918a12-03b6-4867-8598-8739f036c746","Type":"ContainerStarted","Data":"debc80b5e5e70a2b4bfa60eefffaa876f8b3e18abeef918d0f80082abb2dc535"} Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.453191 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6fgm2" event={"ID":"8484f8fc-ef23-4e59-9461-bdacf24ded18","Type":"ContainerStarted","Data":"84fbe8a580a978bdb2880254869bf6f68cc111199c7c8ca09fef109ec5d663ca"} Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.453240 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh"] Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.462510 4818 patch_prober.go:28] interesting pod/downloads-7954f5f757-7r27t container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.462552 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7r27t" podUID="8b00b5de-92e6-45ef-bd66-2f06b0b0e249" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.489206 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-wr9kd" podStartSLOduration=129.489186466 podStartE2EDuration="2m9.489186466s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:48.487156248 +0000 UTC m=+155.241428064" watchObservedRunningTime="2025-09-30 17:01:48.489186466 +0000 UTC m=+155.243458282" Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.515603 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:48 crc kubenswrapper[4818]: E0930 17:01:48.516670 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:49.016654375 +0000 UTC m=+155.770926191 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.617550 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:48 crc kubenswrapper[4818]: E0930 17:01:48.617912 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:49.117899326 +0000 UTC m=+155.872171142 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.711450 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-7rnqd" podStartSLOduration=129.711423989 podStartE2EDuration="2m9.711423989s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:48.658827097 +0000 UTC m=+155.413098913" watchObservedRunningTime="2025-09-30 17:01:48.711423989 +0000 UTC m=+155.465695805" Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.720383 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:48 crc kubenswrapper[4818]: E0930 17:01:48.720628 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:49.22061251 +0000 UTC m=+155.974884316 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.723458 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8gcpn"] Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.772772 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt" podStartSLOduration=129.772754089 podStartE2EDuration="2m9.772754089s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:48.76927573 +0000 UTC m=+155.523547546" watchObservedRunningTime="2025-09-30 17:01:48.772754089 +0000 UTC m=+155.527025905" Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.775521 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xq6hp"] Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.832977 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:48 crc kubenswrapper[4818]: E0930 17:01:48.833304 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:49.333292816 +0000 UTC m=+156.087564632 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.842357 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-m7rls" podStartSLOduration=129.842336512 podStartE2EDuration="2m9.842336512s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:48.816813168 +0000 UTC m=+155.571084974" watchObservedRunningTime="2025-09-30 17:01:48.842336512 +0000 UTC m=+155.596608328" Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.844854 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l"] Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.933985 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:48 crc kubenswrapper[4818]: E0930 17:01:48.934727 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:49.434689072 +0000 UTC m=+156.188960888 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:48 crc kubenswrapper[4818]: I0930 17:01:48.988826 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" podStartSLOduration=129.988803647 podStartE2EDuration="2m9.988803647s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:48.983436385 +0000 UTC m=+155.737708211" watchObservedRunningTime="2025-09-30 17:01:48.988803647 +0000 UTC m=+155.743075463" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.010279 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-kvwlg"] Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.010690 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" podStartSLOduration=130.010677587 podStartE2EDuration="2m10.010677587s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:49.008399643 +0000 UTC m=+155.762671459" watchObservedRunningTime="2025-09-30 17:01:49.010677587 +0000 UTC m=+155.764949403" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.036387 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" podStartSLOduration=130.036370346 podStartE2EDuration="2m10.036370346s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:49.033414142 +0000 UTC m=+155.787685958" watchObservedRunningTime="2025-09-30 17:01:49.036370346 +0000 UTC m=+155.790642162" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.036799 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:49 crc kubenswrapper[4818]: E0930 17:01:49.037234 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:49.53721918 +0000 UTC m=+156.291490996 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.091844 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ll9ll" podStartSLOduration=130.091827839 podStartE2EDuration="2m10.091827839s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:49.090750069 +0000 UTC m=+155.845021885" watchObservedRunningTime="2025-09-30 17:01:49.091827839 +0000 UTC m=+155.846099665" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.092090 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j5wfn" podStartSLOduration=130.092083496 podStartE2EDuration="2m10.092083496s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:49.066039318 +0000 UTC m=+155.820311154" watchObservedRunningTime="2025-09-30 17:01:49.092083496 +0000 UTC m=+155.846355312" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.137736 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:49 crc kubenswrapper[4818]: E0930 17:01:49.138090 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:49.638069271 +0000 UTC m=+156.392341087 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.144094 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-w66fc" podStartSLOduration=130.144071211 podStartE2EDuration="2m10.144071211s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:49.129625761 +0000 UTC m=+155.883897577" watchObservedRunningTime="2025-09-30 17:01:49.144071211 +0000 UTC m=+155.898343027" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.215771 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-j5fc5" podStartSLOduration=130.215753464 podStartE2EDuration="2m10.215753464s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:49.214128428 +0000 UTC m=+155.968400244" watchObservedRunningTime="2025-09-30 17:01:49.215753464 +0000 UTC m=+155.970025280" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.223057 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-888xq" podStartSLOduration=131.223029021 podStartE2EDuration="2m11.223029021s" podCreationTimestamp="2025-09-30 16:59:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:49.173964429 +0000 UTC m=+155.928236245" watchObservedRunningTime="2025-09-30 17:01:49.223029021 +0000 UTC m=+155.977300837" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.238642 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:49 crc kubenswrapper[4818]: E0930 17:01:49.238934 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:49.738908141 +0000 UTC m=+156.493179957 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.254021 4818 patch_prober.go:28] interesting pod/router-default-5444994796-gnmfp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 30 17:01:49 crc kubenswrapper[4818]: [-]has-synced failed: reason withheld Sep 30 17:01:49 crc kubenswrapper[4818]: [+]process-running ok Sep 30 17:01:49 crc kubenswrapper[4818]: healthz check failed Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.254339 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gnmfp" podUID="d8970c17-d95f-454b-ac56-db24223ef2fc" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.267326 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-5lwgz" podStartSLOduration=6.267309337 podStartE2EDuration="6.267309337s" podCreationTimestamp="2025-09-30 17:01:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:49.26391587 +0000 UTC m=+156.018187686" watchObservedRunningTime="2025-09-30 17:01:49.267309337 +0000 UTC m=+156.021581153" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.288675 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-tg7pk"] Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.306004 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-scg48"] Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.310940 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9g8j4"] Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.330985 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-mfrws"] Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.331039 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl"] Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.341834 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:49 crc kubenswrapper[4818]: E0930 17:01:49.342167 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:49.84215391 +0000 UTC m=+156.596425726 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.376253 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-rkqvp"] Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.397982 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf"] Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.444998 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:49 crc kubenswrapper[4818]: E0930 17:01:49.445330 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:49.945318926 +0000 UTC m=+156.699590742 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.526752 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xq6hp" event={"ID":"317be33e-ccbd-44f6-aafa-f77a1f2ba6eb","Type":"ContainerStarted","Data":"7a8350baf841c850dc78f32b238fd6e7decf60ec491b826484b2e96dc20359d5"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.532910 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-c494l" event={"ID":"c2569fcb-5e0d-486a-8a66-17a5539c2ae0","Type":"ContainerStarted","Data":"1fd402df02c65e061ba9ac5a64d1e25bec2fed92d8a6f716658bd17fd7838925"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.554541 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:49 crc kubenswrapper[4818]: E0930 17:01:49.555227 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:50.055213703 +0000 UTC m=+156.809485519 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.555945 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l" event={"ID":"708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3","Type":"ContainerStarted","Data":"f61823c463d85545f1cd1dd298f2e8db160e6d68ae5822f5b473a4d75a8e8589"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.555988 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l" event={"ID":"708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3","Type":"ContainerStarted","Data":"51f84d68e74722ada356847a5d4bd5effb9f1329550fbf9f8482ada7df1f83c3"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.556400 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.557587 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" event={"ID":"94a6a5c0-845a-4f60-b111-eb28393fb07c","Type":"ContainerStarted","Data":"c9788da5387ee11f7426151fb32c495398273fd27d7b7d039e54f3e7cba93c24"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.558265 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.569685 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl" event={"ID":"41ac1391-f116-4d7b-88dd-d694671283dc","Type":"ContainerStarted","Data":"9d374d272cd690127bb6878063a545be04ae787588a391557042920622bff077"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.571007 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-c494l" podStartSLOduration=6.570990491 podStartE2EDuration="6.570990491s" podCreationTimestamp="2025-09-30 17:01:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:49.570102426 +0000 UTC m=+156.324374242" watchObservedRunningTime="2025-09-30 17:01:49.570990491 +0000 UTC m=+156.325262307" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.577562 4818 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-fbfxd container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.35:6443/healthz\": dial tcp 10.217.0.35:6443: connect: connection refused" start-of-body= Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.577616 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" podUID="94a6a5c0-845a-4f60-b111-eb28393fb07c" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.35:6443/healthz\": dial tcp 10.217.0.35:6443: connect: connection refused" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.577690 4818 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-9qx5l container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.577707 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l" podUID="708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.578256 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-5lwgz" event={"ID":"fe451263-a8c3-4bf2-a4ea-b6c21fac505a","Type":"ContainerStarted","Data":"12088088d215f8d89e4d998650dfc83951403f61b4f0af3409a168d871ef4427"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.598116 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l" podStartSLOduration=130.59809928 podStartE2EDuration="2m10.59809928s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:49.593458578 +0000 UTC m=+156.347730394" watchObservedRunningTime="2025-09-30 17:01:49.59809928 +0000 UTC m=+156.352371096" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.598959 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" event={"ID":"6652f16f-304d-4c4a-84dd-97b68a4aa04b","Type":"ContainerStarted","Data":"a26b25dea10e988a92fa55580a4f9dea9814411db72a37bce317704100d44a37"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.599268 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.600058 4818 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-8gcpn container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.42:8080/healthz\": dial tcp 10.217.0.42:8080: connect: connection refused" start-of-body= Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.600090 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" podUID="6652f16f-304d-4c4a-84dd-97b68a4aa04b" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.42:8080/healthz\": dial tcp 10.217.0.42:8080: connect: connection refused" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.614114 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-scg48" event={"ID":"a063af3b-6bf1-45bc-a02d-92679099140c","Type":"ContainerStarted","Data":"ab7aa774929a9ee4b3c81088785d2354874ce6e87f8b96610d0590c16a870d1a"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.621470 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-tg7pk" event={"ID":"848650c8-e32a-4c8a-8aae-9fbfa62ba6b9","Type":"ContainerStarted","Data":"828628e6941c839c157319f74dda4df99a70347fe5e7744e6d529528663265ab"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.644303 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5n9lk" event={"ID":"bc973df4-6c8f-448c-a1f8-609fd7526f3e","Type":"ContainerStarted","Data":"b9524c689905e881d438d38fac1abab676e3895715b5877ac6f9800d602c55bd"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.644342 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5n9lk" event={"ID":"bc973df4-6c8f-448c-a1f8-609fd7526f3e","Type":"ContainerStarted","Data":"b9a4992f06e316a74342d76fb473b27e6dc131d9f06e1b59694f3fc121b61226"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.658198 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.685726 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" podStartSLOduration=130.685712425 podStartE2EDuration="2m10.685712425s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:49.683295166 +0000 UTC m=+156.437566982" watchObservedRunningTime="2025-09-30 17:01:49.685712425 +0000 UTC m=+156.439984241" Sep 30 17:01:49 crc kubenswrapper[4818]: E0930 17:01:49.685938 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:50.185906671 +0000 UTC m=+156.940178477 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.728292 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" podStartSLOduration=131.728254052 podStartE2EDuration="2m11.728254052s" podCreationTimestamp="2025-09-30 16:59:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:49.635526641 +0000 UTC m=+156.389798457" watchObservedRunningTime="2025-09-30 17:01:49.728254052 +0000 UTC m=+156.482525868" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.743272 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6fgm2" event={"ID":"8484f8fc-ef23-4e59-9461-bdacf24ded18","Type":"ContainerStarted","Data":"33622f20b14fa26c5b13744f34e209766a75357b47f7447daa2e2ce912573d0f"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.744170 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5n9lk" podStartSLOduration=130.744161723 podStartE2EDuration="2m10.744161723s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:49.744016819 +0000 UTC m=+156.498288635" watchObservedRunningTime="2025-09-30 17:01:49.744161723 +0000 UTC m=+156.498433539" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.749664 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-fppn6" event={"ID":"445ee21c-87e4-4bd8-9550-0ed6c16933f7","Type":"ContainerStarted","Data":"16f72c9e67920961025b5dd698aaf1ad0df5f259b251b0c218d1aa107988db5a"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.763384 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:49 crc kubenswrapper[4818]: E0930 17:01:49.763621 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:50.263594544 +0000 UTC m=+157.017866360 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.763741 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:49 crc kubenswrapper[4818]: E0930 17:01:49.764897 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:50.264888081 +0000 UTC m=+157.019159897 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.776229 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9g8j4" event={"ID":"589570b2-6222-4f78-80bb-05f8f926957e","Type":"ContainerStarted","Data":"42f049f04278ab03d3bc3bd59aa12a0c5c922fa588c0b0fd703eb81df10efdf3"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.787658 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-fppn6" podStartSLOduration=130.787641796 podStartE2EDuration="2m10.787641796s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:49.786950167 +0000 UTC m=+156.541221983" watchObservedRunningTime="2025-09-30 17:01:49.787641796 +0000 UTC m=+156.541913612" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.788869 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6fgm2" podStartSLOduration=130.788864041 podStartE2EDuration="2m10.788864041s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:49.769876982 +0000 UTC m=+156.524148798" watchObservedRunningTime="2025-09-30 17:01:49.788864041 +0000 UTC m=+156.543135857" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.809014 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-rkqvp" event={"ID":"4af5c2a5-d804-4267-ac93-799d3699c538","Type":"ContainerStarted","Data":"ebb6a64162afb510e40b645f5d8f95c19682432f5f91be81ec7af3afa72e38c8"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.842237 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-q8cn5" event={"ID":"133ce1d2-cc16-41f9-b136-d197c20847d1","Type":"ContainerStarted","Data":"648b1538bcd9561b6acc14139c40b5e514dc8936c4478da47ec6a2e0eb0f1f10"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.842285 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-q8cn5" event={"ID":"133ce1d2-cc16-41f9-b136-d197c20847d1","Type":"ContainerStarted","Data":"07dbdb693bf988028b3cada0f25ee91876263bf438f68fe1a51f4d0e88dad6c6"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.845877 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" event={"ID":"d4db418f-d110-4268-bf5f-6a1c9e565f84","Type":"ContainerStarted","Data":"a7c9ab5c5c4c8801a9243e3759dddf7aaa05f77db263d6049cbfc2aa7b893917"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.862982 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" event={"ID":"95508e9a-38fe-4c90-83ee-1b87733b07a6","Type":"ContainerStarted","Data":"e6c85d4d98342b9a5fe2856b356c583eb6d3d7eecca11e0decd4669c0688f546"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.863027 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" event={"ID":"95508e9a-38fe-4c90-83ee-1b87733b07a6","Type":"ContainerStarted","Data":"ed6aa4a0e9f4b493f55e6d15bd7f573cb5e179510b218417c1826a755c27a501"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.864373 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:49 crc kubenswrapper[4818]: E0930 17:01:49.864710 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:50.364697472 +0000 UTC m=+157.118969288 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.867017 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2" event={"ID":"7647753f-a8c4-498f-b876-7553155a6159","Type":"ContainerStarted","Data":"26c247bde87aac4c65765af13326bf386c593e7d7e2349bd159303822d0aaa16"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.870278 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q" event={"ID":"4ac2d320-4757-43f3-8dcd-77c514918234","Type":"ContainerStarted","Data":"f8b586905d3fcf70741706c3301282919ee440bb57e3265b030af9a81a58090c"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.871001 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.872746 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" event={"ID":"81b51d61-ed65-4721-8800-9920be8a34e0","Type":"ContainerStarted","Data":"faec7127a8101ce6fcb8cb8423ccefb9895aacf478f3bce163198d5f050e76b8"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.872776 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" event={"ID":"81b51d61-ed65-4721-8800-9920be8a34e0","Type":"ContainerStarted","Data":"e40d8c53110eb302c82c45b5a972fd7c4f9822265e4c70b4c55edc36d51e2df7"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.873014 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.889122 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-96n5p" event={"ID":"cccf2295-4d3d-410a-b491-86f9d85d264b","Type":"ContainerStarted","Data":"55a25721261fb9377cc61d3e00c40d2eecfe2c949c4c65a40e96c7c68dbd9708"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.889165 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-96n5p" event={"ID":"cccf2295-4d3d-410a-b491-86f9d85d264b","Type":"ContainerStarted","Data":"50ffe1fea3b6f38b95c2705d0db56eb66b54df5db2390154f52ce764d504365c"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.889771 4818 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-4m5bh container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:5443/healthz\": dial tcp 10.217.0.40:5443: connect: connection refused" start-of-body= Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.889804 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" podUID="81b51d61-ed65-4721-8800-9920be8a34e0" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.40:5443/healthz\": dial tcp 10.217.0.40:5443: connect: connection refused" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.890061 4818 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-66b7q container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:8443/healthz\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.890081 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q" podUID="4ac2d320-4757-43f3-8dcd-77c514918234" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.28:8443/healthz\": dial tcp 10.217.0.28:8443: connect: connection refused" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.902718 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" podStartSLOduration=131.90270623 podStartE2EDuration="2m11.90270623s" podCreationTimestamp="2025-09-30 16:59:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:49.901496816 +0000 UTC m=+156.655768632" watchObservedRunningTime="2025-09-30 17:01:49.90270623 +0000 UTC m=+156.656978046" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.903504 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-q8cn5" podStartSLOduration=130.903498793 podStartE2EDuration="2m10.903498793s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:49.864039293 +0000 UTC m=+156.618311109" watchObservedRunningTime="2025-09-30 17:01:49.903498793 +0000 UTC m=+156.657770609" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.904455 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-mfrws" event={"ID":"517b0538-1d7a-495f-af7e-9dcfb71d9cd3","Type":"ContainerStarted","Data":"e17b02c97a4e0ab4ebf6d099b8ece9ea69479009ba77fc2a0f79ea0bd629ccd8"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.944471 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" podStartSLOduration=130.944457364 podStartE2EDuration="2m10.944457364s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:49.944055973 +0000 UTC m=+156.698327789" watchObservedRunningTime="2025-09-30 17:01:49.944457364 +0000 UTC m=+156.698729180" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.947511 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf" event={"ID":"6430a370-0a1f-4a1f-b156-037ae868cf87","Type":"ContainerStarted","Data":"2fb9ef9d18599c5a749a8fe4c82a510b2372967f7ecda2da7d36d7c09dc434c9"} Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.965642 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:49 crc kubenswrapper[4818]: E0930 17:01:49.965906 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:50.465894652 +0000 UTC m=+157.220166468 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.999079 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:49 crc kubenswrapper[4818]: I0930 17:01:49.999160 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.000998 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-g6ck2" podStartSLOduration=131.000982598 podStartE2EDuration="2m11.000982598s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:50.000456233 +0000 UTC m=+156.754728059" watchObservedRunningTime="2025-09-30 17:01:50.000982598 +0000 UTC m=+156.755254414" Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.041796 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q" podStartSLOduration=131.041777765 podStartE2EDuration="2m11.041777765s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:50.041303101 +0000 UTC m=+156.795574917" watchObservedRunningTime="2025-09-30 17:01:50.041777765 +0000 UTC m=+156.796049581" Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.064221 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.069488 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:50 crc kubenswrapper[4818]: E0930 17:01:50.070728 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:50.570714586 +0000 UTC m=+157.324986402 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.089744 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-96n5p" podStartSLOduration=131.089708925 podStartE2EDuration="2m11.089708925s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:50.088594853 +0000 UTC m=+156.842866669" watchObservedRunningTime="2025-09-30 17:01:50.089708925 +0000 UTC m=+156.843980741" Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.175467 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:50 crc kubenswrapper[4818]: E0930 17:01:50.175891 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:50.675877919 +0000 UTC m=+157.430149735 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.239984 4818 patch_prober.go:28] interesting pod/router-default-5444994796-gnmfp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 30 17:01:50 crc kubenswrapper[4818]: [-]has-synced failed: reason withheld Sep 30 17:01:50 crc kubenswrapper[4818]: [+]process-running ok Sep 30 17:01:50 crc kubenswrapper[4818]: healthz check failed Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.240049 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gnmfp" podUID="d8970c17-d95f-454b-ac56-db24223ef2fc" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.267798 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.267849 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.269911 4818 patch_prober.go:28] interesting pod/apiserver-76f77b778f-qv4dz container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.269966 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" podUID="95508e9a-38fe-4c90-83ee-1b87733b07a6" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.276190 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:50 crc kubenswrapper[4818]: E0930 17:01:50.276348 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:50.776316118 +0000 UTC m=+157.530587934 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.377750 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:50 crc kubenswrapper[4818]: E0930 17:01:50.378103 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:50.878079644 +0000 UTC m=+157.632351460 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.478341 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:50 crc kubenswrapper[4818]: E0930 17:01:50.478481 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:50.978458201 +0000 UTC m=+157.732730017 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.478907 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:50 crc kubenswrapper[4818]: E0930 17:01:50.479263 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:50.979244564 +0000 UTC m=+157.733516380 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.579589 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:50 crc kubenswrapper[4818]: E0930 17:01:50.579758 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:51.079732014 +0000 UTC m=+157.834003830 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.579851 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:50 crc kubenswrapper[4818]: E0930 17:01:50.580170 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:51.080156806 +0000 UTC m=+157.834428622 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.680753 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:50 crc kubenswrapper[4818]: E0930 17:01:50.680954 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:51.180893494 +0000 UTC m=+157.935165310 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.681015 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:50 crc kubenswrapper[4818]: E0930 17:01:50.681287 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:51.181272164 +0000 UTC m=+157.935543980 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.781897 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:50 crc kubenswrapper[4818]: E0930 17:01:50.782065 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:51.282042293 +0000 UTC m=+158.036314109 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.782301 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:50 crc kubenswrapper[4818]: E0930 17:01:50.782610 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:51.282600639 +0000 UTC m=+158.036872455 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.883230 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:50 crc kubenswrapper[4818]: E0930 17:01:50.883444 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:51.383428969 +0000 UTC m=+158.137700785 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.953946 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-rkqvp" event={"ID":"4af5c2a5-d804-4267-ac93-799d3699c538","Type":"ContainerStarted","Data":"bef3150a075c268a9e3b204705251466502edd450e80f36af7638e13a4dc64d5"} Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.955381 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf" event={"ID":"6430a370-0a1f-4a1f-b156-037ae868cf87","Type":"ContainerStarted","Data":"c95e100e5f31bb387dacc93b3c3e7379e22e066509630b41b05dab5a6e08f849"} Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.955459 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf" event={"ID":"6430a370-0a1f-4a1f-b156-037ae868cf87","Type":"ContainerStarted","Data":"768f51c33a559c750b7313088c84eb7ed91bd63af8899dffd9130987331c24b8"} Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.956535 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl" event={"ID":"41ac1391-f116-4d7b-88dd-d694671283dc","Type":"ContainerStarted","Data":"6041c2482cc1e4d4b4adf40b1b640115bbfa90ed632da156f07e4604d4b31caf"} Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.957662 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" event={"ID":"6652f16f-304d-4c4a-84dd-97b68a4aa04b","Type":"ContainerStarted","Data":"c12b60eea63bd9f65e5e6121dc1192b5f64948b799427542de01dcd7bc919052"} Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.958218 4818 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-8gcpn container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.42:8080/healthz\": dial tcp 10.217.0.42:8080: connect: connection refused" start-of-body= Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.958302 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" podUID="6652f16f-304d-4c4a-84dd-97b68a4aa04b" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.42:8080/healthz\": dial tcp 10.217.0.42:8080: connect: connection refused" Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.958697 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-tg7pk" event={"ID":"848650c8-e32a-4c8a-8aae-9fbfa62ba6b9","Type":"ContainerStarted","Data":"590cffa393ba890ce8db742e148c02be98fb4b6c7a0cd0e73a38747aa307c413"} Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.960185 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9g8j4" event={"ID":"589570b2-6222-4f78-80bb-05f8f926957e","Type":"ContainerStarted","Data":"45d35cffef18cc61464e916e134b17d2b7e11f5f4751a816679c37bd381a6bf6"} Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.960220 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9g8j4" event={"ID":"589570b2-6222-4f78-80bb-05f8f926957e","Type":"ContainerStarted","Data":"ee3b5c4ca436b376612c58e9c11b7b0922c7d18f6c884ac540150a56b60a9296"} Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.960325 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9g8j4" Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.961470 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-96n5p" event={"ID":"cccf2295-4d3d-410a-b491-86f9d85d264b","Type":"ContainerStarted","Data":"9339e2e819537e3c071ed4291e72720fed516cd7d49c43765f1a1e5f918be83a"} Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.962857 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-mfrws" event={"ID":"517b0538-1d7a-495f-af7e-9dcfb71d9cd3","Type":"ContainerStarted","Data":"ebf0ee071eaafff7e9cb386532340ea002d882503b30b413286a8d0540ff6dd6"} Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.962894 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-mfrws" event={"ID":"517b0538-1d7a-495f-af7e-9dcfb71d9cd3","Type":"ContainerStarted","Data":"410a5d33de38d02d26e62ef6a00646473d79cff3129a5a4439009292e074ba8d"} Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.963972 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xq6hp" event={"ID":"317be33e-ccbd-44f6-aafa-f77a1f2ba6eb","Type":"ContainerStarted","Data":"b7021ed06093a233b56dda7145d66d7d3f121acb7be78134ad511c3c258c2367"} Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.965247 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-scg48" event={"ID":"a063af3b-6bf1-45bc-a02d-92679099140c","Type":"ContainerStarted","Data":"eef357261f4eab57d73d4af4f10766ce326e90c0a45b48dee29b9c9471809e6a"} Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.965291 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-scg48" event={"ID":"a063af3b-6bf1-45bc-a02d-92679099140c","Type":"ContainerStarted","Data":"b3263864478723f8d321fd2b2f3b69897bf8cdb2bc1dde95c56399a317ad07b7"} Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.965718 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-scg48" Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.966254 4818 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-fbfxd container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.35:6443/healthz\": dial tcp 10.217.0.35:6443: connect: connection refused" start-of-body= Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.966299 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" podUID="94a6a5c0-845a-4f60-b111-eb28393fb07c" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.35:6443/healthz\": dial tcp 10.217.0.35:6443: connect: connection refused" Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.966452 4818 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-4m5bh container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:5443/healthz\": dial tcp 10.217.0.40:5443: connect: connection refused" start-of-body= Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.966480 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" podUID="81b51d61-ed65-4721-8800-9920be8a34e0" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.40:5443/healthz\": dial tcp 10.217.0.40:5443: connect: connection refused" Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.966512 4818 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-66b7q container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:8443/healthz\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.966544 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q" podUID="4ac2d320-4757-43f3-8dcd-77c514918234" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.28:8443/healthz\": dial tcp 10.217.0.28:8443: connect: connection refused" Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.966645 4818 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-9qx5l container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.966703 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l" podUID="708b3cc6-18a6-4a8a-9a37-0a38ee1b0ab3" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.976954 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-rkqvp" podStartSLOduration=131.9768972 podStartE2EDuration="2m11.9768972s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:50.973987097 +0000 UTC m=+157.728258913" watchObservedRunningTime="2025-09-30 17:01:50.9768972 +0000 UTC m=+157.731169016" Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.982708 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bf4lg" Sep 30 17:01:50 crc kubenswrapper[4818]: I0930 17:01:50.984233 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:50 crc kubenswrapper[4818]: E0930 17:01:50.984754 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:51.484742233 +0000 UTC m=+158.239014049 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.001802 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl" podStartSLOduration=111.001785926 podStartE2EDuration="1m51.001785926s" podCreationTimestamp="2025-09-30 17:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:51.000569141 +0000 UTC m=+157.754840967" watchObservedRunningTime="2025-09-30 17:01:51.001785926 +0000 UTC m=+157.756057742" Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.045093 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-tg7pk" podStartSLOduration=132.045075334 podStartE2EDuration="2m12.045075334s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:51.044630611 +0000 UTC m=+157.798902437" watchObservedRunningTime="2025-09-30 17:01:51.045075334 +0000 UTC m=+157.799347140" Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.045244 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nvtdf" podStartSLOduration=132.045240159 podStartE2EDuration="2m12.045240159s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:51.020871757 +0000 UTC m=+157.775143573" watchObservedRunningTime="2025-09-30 17:01:51.045240159 +0000 UTC m=+157.799511975" Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.057572 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-scg48" podStartSLOduration=8.057555338 podStartE2EDuration="8.057555338s" podCreationTimestamp="2025-09-30 17:01:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:51.05728719 +0000 UTC m=+157.811559006" watchObservedRunningTime="2025-09-30 17:01:51.057555338 +0000 UTC m=+157.811827154" Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.073745 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9g8j4" podStartSLOduration=132.073727697 podStartE2EDuration="2m12.073727697s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:51.072851892 +0000 UTC m=+157.827123708" watchObservedRunningTime="2025-09-30 17:01:51.073727697 +0000 UTC m=+157.827999513" Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.080848 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lnpjt" Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.085932 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:51 crc kubenswrapper[4818]: E0930 17:01:51.086103 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:51.586078577 +0000 UTC m=+158.340350383 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.086682 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:51 crc kubenswrapper[4818]: E0930 17:01:51.087453 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:51.587441016 +0000 UTC m=+158.341712832 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.093823 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-mfrws" podStartSLOduration=132.093801286 podStartE2EDuration="2m12.093801286s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:51.091944443 +0000 UTC m=+157.846216259" watchObservedRunningTime="2025-09-30 17:01:51.093801286 +0000 UTC m=+157.848073102" Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.129764 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xq6hp" podStartSLOduration=132.129746966 podStartE2EDuration="2m12.129746966s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:51.10697123 +0000 UTC m=+157.861243046" watchObservedRunningTime="2025-09-30 17:01:51.129746966 +0000 UTC m=+157.884018782" Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.189825 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:51 crc kubenswrapper[4818]: E0930 17:01:51.190061 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:51.690039036 +0000 UTC m=+158.444310852 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.190159 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:51 crc kubenswrapper[4818]: E0930 17:01:51.190429 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:51.690420097 +0000 UTC m=+158.444691913 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.242915 4818 patch_prober.go:28] interesting pod/router-default-5444994796-gnmfp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 30 17:01:51 crc kubenswrapper[4818]: [-]has-synced failed: reason withheld Sep 30 17:01:51 crc kubenswrapper[4818]: [+]process-running ok Sep 30 17:01:51 crc kubenswrapper[4818]: healthz check failed Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.242981 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gnmfp" podUID="d8970c17-d95f-454b-ac56-db24223ef2fc" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.290718 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:51 crc kubenswrapper[4818]: E0930 17:01:51.290907 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:51.790877656 +0000 UTC m=+158.545149482 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.291046 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:51 crc kubenswrapper[4818]: E0930 17:01:51.291356 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:51.791347829 +0000 UTC m=+158.545619645 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.391784 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:51 crc kubenswrapper[4818]: E0930 17:01:51.391909 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:51.891885781 +0000 UTC m=+158.646157597 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.392301 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:51 crc kubenswrapper[4818]: E0930 17:01:51.392664 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:51.892653163 +0000 UTC m=+158.646924969 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.492974 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:51 crc kubenswrapper[4818]: E0930 17:01:51.493195 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:51.993179685 +0000 UTC m=+158.747451501 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.593848 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:51 crc kubenswrapper[4818]: E0930 17:01:51.594148 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:52.094134898 +0000 UTC m=+158.848406714 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.694287 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:51 crc kubenswrapper[4818]: E0930 17:01:51.694476 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:52.194452844 +0000 UTC m=+158.948724660 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.795805 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:51 crc kubenswrapper[4818]: E0930 17:01:51.796150 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:52.296134498 +0000 UTC m=+159.050406314 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.897166 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:51 crc kubenswrapper[4818]: E0930 17:01:51.897330 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:52.397305458 +0000 UTC m=+159.151577274 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.897535 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:51 crc kubenswrapper[4818]: E0930 17:01:51.897888 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:52.397876324 +0000 UTC m=+159.152148140 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.973345 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" event={"ID":"d4db418f-d110-4268-bf5f-6a1c9e565f84","Type":"ContainerStarted","Data":"959100a7862d245cec3c4e1f8e1daecddfd0ce2bd5aa195238e874c675616520"} Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.974104 4818 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-8gcpn container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.42:8080/healthz\": dial tcp 10.217.0.42:8080: connect: connection refused" start-of-body= Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.974149 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" podUID="6652f16f-304d-4c4a-84dd-97b68a4aa04b" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.42:8080/healthz\": dial tcp 10.217.0.42:8080: connect: connection refused" Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.977025 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-66b7q" Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.988259 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:01:51 crc kubenswrapper[4818]: I0930 17:01:51.998333 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:51 crc kubenswrapper[4818]: E0930 17:01:51.998523 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:52.498492918 +0000 UTC m=+159.252764754 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:52 crc kubenswrapper[4818]: I0930 17:01:52.000965 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:52 crc kubenswrapper[4818]: E0930 17:01:52.001549 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:52.501538424 +0000 UTC m=+159.255810240 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:52 crc kubenswrapper[4818]: I0930 17:01:52.101766 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:52 crc kubenswrapper[4818]: E0930 17:01:52.102009 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:52.601985934 +0000 UTC m=+159.356257750 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:52 crc kubenswrapper[4818]: I0930 17:01:52.102061 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:52 crc kubenswrapper[4818]: E0930 17:01:52.102340 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:52.602328723 +0000 UTC m=+159.356600539 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:52 crc kubenswrapper[4818]: I0930 17:01:52.203159 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:52 crc kubenswrapper[4818]: E0930 17:01:52.203437 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:52.70341512 +0000 UTC m=+159.457686936 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:52 crc kubenswrapper[4818]: I0930 17:01:52.242214 4818 patch_prober.go:28] interesting pod/router-default-5444994796-gnmfp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 30 17:01:52 crc kubenswrapper[4818]: [-]has-synced failed: reason withheld Sep 30 17:01:52 crc kubenswrapper[4818]: [+]process-running ok Sep 30 17:01:52 crc kubenswrapper[4818]: healthz check failed Sep 30 17:01:52 crc kubenswrapper[4818]: I0930 17:01:52.242269 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gnmfp" podUID="d8970c17-d95f-454b-ac56-db24223ef2fc" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 30 17:01:52 crc kubenswrapper[4818]: I0930 17:01:52.305013 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:52 crc kubenswrapper[4818]: E0930 17:01:52.305306 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:52.80529422 +0000 UTC m=+159.559566036 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:52 crc kubenswrapper[4818]: I0930 17:01:52.406407 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:52 crc kubenswrapper[4818]: E0930 17:01:52.406535 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:52.906513461 +0000 UTC m=+159.660785277 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:52 crc kubenswrapper[4818]: I0930 17:01:52.406727 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:52 crc kubenswrapper[4818]: E0930 17:01:52.407083 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:52.907070776 +0000 UTC m=+159.661342592 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:52 crc kubenswrapper[4818]: I0930 17:01:52.507312 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:52 crc kubenswrapper[4818]: E0930 17:01:52.507999 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:53.007977949 +0000 UTC m=+159.762249765 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:52 crc kubenswrapper[4818]: I0930 17:01:52.595727 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:01:52 crc kubenswrapper[4818]: I0930 17:01:52.596001 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:01:52 crc kubenswrapper[4818]: I0930 17:01:52.609535 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:52 crc kubenswrapper[4818]: E0930 17:01:52.609948 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:53.109935921 +0000 UTC m=+159.864207737 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:52 crc kubenswrapper[4818]: I0930 17:01:52.710831 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:52 crc kubenswrapper[4818]: E0930 17:01:52.711150 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:53.211132801 +0000 UTC m=+159.965404617 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:52 crc kubenswrapper[4818]: I0930 17:01:52.812458 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:52 crc kubenswrapper[4818]: E0930 17:01:52.812792 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:53.312777444 +0000 UTC m=+160.067049260 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:52 crc kubenswrapper[4818]: I0930 17:01:52.914217 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:52 crc kubenswrapper[4818]: E0930 17:01:52.914470 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:53.414455269 +0000 UTC m=+160.168727075 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.015417 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:53 crc kubenswrapper[4818]: E0930 17:01:53.015675 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:53.515664369 +0000 UTC m=+160.269936175 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.116359 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:53 crc kubenswrapper[4818]: E0930 17:01:53.116514 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:53.616486539 +0000 UTC m=+160.370758355 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.116560 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:53 crc kubenswrapper[4818]: E0930 17:01:53.116968 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:53.616960703 +0000 UTC m=+160.371232509 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.217533 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:53 crc kubenswrapper[4818]: E0930 17:01:53.217746 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:53.717718861 +0000 UTC m=+160.471990677 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.217833 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:53 crc kubenswrapper[4818]: E0930 17:01:53.218327 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:53.718319318 +0000 UTC m=+160.472591134 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.242246 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-28tlv"] Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.243409 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-28tlv" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.245988 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.246782 4818 patch_prober.go:28] interesting pod/router-default-5444994796-gnmfp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 30 17:01:53 crc kubenswrapper[4818]: [-]has-synced failed: reason withheld Sep 30 17:01:53 crc kubenswrapper[4818]: [+]process-running ok Sep 30 17:01:53 crc kubenswrapper[4818]: healthz check failed Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.246822 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gnmfp" podUID="d8970c17-d95f-454b-ac56-db24223ef2fc" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.265612 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-28tlv"] Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.318699 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:53 crc kubenswrapper[4818]: E0930 17:01:53.318871 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:53.818846019 +0000 UTC m=+160.573117835 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.318961 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e-utilities\") pod \"community-operators-28tlv\" (UID: \"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e\") " pod="openshift-marketplace/community-operators-28tlv" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.319054 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tkg55\" (UniqueName: \"kubernetes.io/projected/79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e-kube-api-access-tkg55\") pod \"community-operators-28tlv\" (UID: \"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e\") " pod="openshift-marketplace/community-operators-28tlv" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.319110 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.319292 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e-catalog-content\") pod \"community-operators-28tlv\" (UID: \"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e\") " pod="openshift-marketplace/community-operators-28tlv" Sep 30 17:01:53 crc kubenswrapper[4818]: E0930 17:01:53.319400 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:53.819392225 +0000 UTC m=+160.573664041 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.420075 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:53 crc kubenswrapper[4818]: E0930 17:01:53.420254 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:53.920227295 +0000 UTC m=+160.674499111 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.420307 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e-catalog-content\") pod \"community-operators-28tlv\" (UID: \"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e\") " pod="openshift-marketplace/community-operators-28tlv" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.420347 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e-utilities\") pod \"community-operators-28tlv\" (UID: \"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e\") " pod="openshift-marketplace/community-operators-28tlv" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.420382 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tkg55\" (UniqueName: \"kubernetes.io/projected/79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e-kube-api-access-tkg55\") pod \"community-operators-28tlv\" (UID: \"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e\") " pod="openshift-marketplace/community-operators-28tlv" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.420407 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:53 crc kubenswrapper[4818]: E0930 17:01:53.420707 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:53.920695208 +0000 UTC m=+160.674967024 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.420805 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e-catalog-content\") pod \"community-operators-28tlv\" (UID: \"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e\") " pod="openshift-marketplace/community-operators-28tlv" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.420871 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e-utilities\") pod \"community-operators-28tlv\" (UID: \"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e\") " pod="openshift-marketplace/community-operators-28tlv" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.438769 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tkg55\" (UniqueName: \"kubernetes.io/projected/79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e-kube-api-access-tkg55\") pod \"community-operators-28tlv\" (UID: \"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e\") " pod="openshift-marketplace/community-operators-28tlv" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.446314 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-44qds"] Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.448211 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-44qds" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.454540 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.457494 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-44qds"] Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.521423 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:53 crc kubenswrapper[4818]: E0930 17:01:53.521595 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:54.02157164 +0000 UTC m=+160.775843456 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.521746 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.521802 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcl4q\" (UniqueName: \"kubernetes.io/projected/a59d1ab7-3891-4385-9f6e-8ca1a0bdf204-kube-api-access-jcl4q\") pod \"certified-operators-44qds\" (UID: \"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204\") " pod="openshift-marketplace/certified-operators-44qds" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.521965 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a59d1ab7-3891-4385-9f6e-8ca1a0bdf204-utilities\") pod \"certified-operators-44qds\" (UID: \"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204\") " pod="openshift-marketplace/certified-operators-44qds" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.522049 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a59d1ab7-3891-4385-9f6e-8ca1a0bdf204-catalog-content\") pod \"certified-operators-44qds\" (UID: \"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204\") " pod="openshift-marketplace/certified-operators-44qds" Sep 30 17:01:53 crc kubenswrapper[4818]: E0930 17:01:53.522603 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:54.022591659 +0000 UTC m=+160.776863585 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.569269 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-28tlv" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.623471 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.623593 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a59d1ab7-3891-4385-9f6e-8ca1a0bdf204-utilities\") pod \"certified-operators-44qds\" (UID: \"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204\") " pod="openshift-marketplace/certified-operators-44qds" Sep 30 17:01:53 crc kubenswrapper[4818]: E0930 17:01:53.623645 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:54.123608154 +0000 UTC m=+160.877880020 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.623900 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a59d1ab7-3891-4385-9f6e-8ca1a0bdf204-catalog-content\") pod \"certified-operators-44qds\" (UID: \"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204\") " pod="openshift-marketplace/certified-operators-44qds" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.624161 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.624215 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcl4q\" (UniqueName: \"kubernetes.io/projected/a59d1ab7-3891-4385-9f6e-8ca1a0bdf204-kube-api-access-jcl4q\") pod \"certified-operators-44qds\" (UID: \"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204\") " pod="openshift-marketplace/certified-operators-44qds" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.624243 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a59d1ab7-3891-4385-9f6e-8ca1a0bdf204-utilities\") pod \"certified-operators-44qds\" (UID: \"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204\") " pod="openshift-marketplace/certified-operators-44qds" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.624482 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a59d1ab7-3891-4385-9f6e-8ca1a0bdf204-catalog-content\") pod \"certified-operators-44qds\" (UID: \"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204\") " pod="openshift-marketplace/certified-operators-44qds" Sep 30 17:01:53 crc kubenswrapper[4818]: E0930 17:01:53.624583 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:54.124566721 +0000 UTC m=+160.878838537 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.643000 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mmw5q"] Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.644067 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mmw5q" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.678996 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mmw5q"] Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.693040 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcl4q\" (UniqueName: \"kubernetes.io/projected/a59d1ab7-3891-4385-9f6e-8ca1a0bdf204-kube-api-access-jcl4q\") pod \"certified-operators-44qds\" (UID: \"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204\") " pod="openshift-marketplace/certified-operators-44qds" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.725768 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.726105 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea56cc9d-63e6-4e68-8f01-d93c321c6854-utilities\") pod \"community-operators-mmw5q\" (UID: \"ea56cc9d-63e6-4e68-8f01-d93c321c6854\") " pod="openshift-marketplace/community-operators-mmw5q" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.726180 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea56cc9d-63e6-4e68-8f01-d93c321c6854-catalog-content\") pod \"community-operators-mmw5q\" (UID: \"ea56cc9d-63e6-4e68-8f01-d93c321c6854\") " pod="openshift-marketplace/community-operators-mmw5q" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.726340 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5f5fp\" (UniqueName: \"kubernetes.io/projected/ea56cc9d-63e6-4e68-8f01-d93c321c6854-kube-api-access-5f5fp\") pod \"community-operators-mmw5q\" (UID: \"ea56cc9d-63e6-4e68-8f01-d93c321c6854\") " pod="openshift-marketplace/community-operators-mmw5q" Sep 30 17:01:53 crc kubenswrapper[4818]: E0930 17:01:53.726512 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:54.226472132 +0000 UTC m=+160.980743958 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.781393 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-44qds" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.828602 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.828700 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5f5fp\" (UniqueName: \"kubernetes.io/projected/ea56cc9d-63e6-4e68-8f01-d93c321c6854-kube-api-access-5f5fp\") pod \"community-operators-mmw5q\" (UID: \"ea56cc9d-63e6-4e68-8f01-d93c321c6854\") " pod="openshift-marketplace/community-operators-mmw5q" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.828749 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea56cc9d-63e6-4e68-8f01-d93c321c6854-utilities\") pod \"community-operators-mmw5q\" (UID: \"ea56cc9d-63e6-4e68-8f01-d93c321c6854\") " pod="openshift-marketplace/community-operators-mmw5q" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.828784 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea56cc9d-63e6-4e68-8f01-d93c321c6854-catalog-content\") pod \"community-operators-mmw5q\" (UID: \"ea56cc9d-63e6-4e68-8f01-d93c321c6854\") " pod="openshift-marketplace/community-operators-mmw5q" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.829463 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea56cc9d-63e6-4e68-8f01-d93c321c6854-catalog-content\") pod \"community-operators-mmw5q\" (UID: \"ea56cc9d-63e6-4e68-8f01-d93c321c6854\") " pod="openshift-marketplace/community-operators-mmw5q" Sep 30 17:01:53 crc kubenswrapper[4818]: E0930 17:01:53.830683 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:54.330547124 +0000 UTC m=+161.084818940 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.831999 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea56cc9d-63e6-4e68-8f01-d93c321c6854-utilities\") pod \"community-operators-mmw5q\" (UID: \"ea56cc9d-63e6-4e68-8f01-d93c321c6854\") " pod="openshift-marketplace/community-operators-mmw5q" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.843869 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qv9v9"] Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.845366 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qv9v9" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.853321 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qv9v9"] Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.856725 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5f5fp\" (UniqueName: \"kubernetes.io/projected/ea56cc9d-63e6-4e68-8f01-d93c321c6854-kube-api-access-5f5fp\") pod \"community-operators-mmw5q\" (UID: \"ea56cc9d-63e6-4e68-8f01-d93c321c6854\") " pod="openshift-marketplace/community-operators-mmw5q" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.930266 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.930519 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e171b72-1e48-4bf2-8c9d-5a2ce38f4588-utilities\") pod \"certified-operators-qv9v9\" (UID: \"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588\") " pod="openshift-marketplace/certified-operators-qv9v9" Sep 30 17:01:53 crc kubenswrapper[4818]: E0930 17:01:53.930540 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:54.43051886 +0000 UTC m=+161.184790676 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.930568 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.930629 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzxmj\" (UniqueName: \"kubernetes.io/projected/6e171b72-1e48-4bf2-8c9d-5a2ce38f4588-kube-api-access-wzxmj\") pod \"certified-operators-qv9v9\" (UID: \"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588\") " pod="openshift-marketplace/certified-operators-qv9v9" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.930660 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e171b72-1e48-4bf2-8c9d-5a2ce38f4588-catalog-content\") pod \"certified-operators-qv9v9\" (UID: \"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588\") " pod="openshift-marketplace/certified-operators-qv9v9" Sep 30 17:01:53 crc kubenswrapper[4818]: E0930 17:01:53.930840 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:54.430832869 +0000 UTC m=+161.185104685 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.946141 4818 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Sep 30 17:01:53 crc kubenswrapper[4818]: I0930 17:01:53.999397 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mmw5q" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.019791 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-28tlv"] Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.029466 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" event={"ID":"d4db418f-d110-4268-bf5f-6a1c9e565f84","Type":"ContainerStarted","Data":"411dd855498cfb2ddfbac8a4f41f4595cc8f3a52993904e770d7411baa6f9207"} Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.029507 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" event={"ID":"d4db418f-d110-4268-bf5f-6a1c9e565f84","Type":"ContainerStarted","Data":"60952c12c26352ae461deb0b729d2dc74579df72dbe7c0149faa33cd0999ae66"} Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.031289 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:54 crc kubenswrapper[4818]: E0930 17:01:54.031412 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:54.531386771 +0000 UTC m=+161.285658587 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.031492 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e171b72-1e48-4bf2-8c9d-5a2ce38f4588-utilities\") pod \"certified-operators-qv9v9\" (UID: \"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588\") " pod="openshift-marketplace/certified-operators-qv9v9" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.031520 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.031585 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzxmj\" (UniqueName: \"kubernetes.io/projected/6e171b72-1e48-4bf2-8c9d-5a2ce38f4588-kube-api-access-wzxmj\") pod \"certified-operators-qv9v9\" (UID: \"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588\") " pod="openshift-marketplace/certified-operators-qv9v9" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.031625 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e171b72-1e48-4bf2-8c9d-5a2ce38f4588-catalog-content\") pod \"certified-operators-qv9v9\" (UID: \"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588\") " pod="openshift-marketplace/certified-operators-qv9v9" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.032011 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e171b72-1e48-4bf2-8c9d-5a2ce38f4588-catalog-content\") pod \"certified-operators-qv9v9\" (UID: \"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588\") " pod="openshift-marketplace/certified-operators-qv9v9" Sep 30 17:01:54 crc kubenswrapper[4818]: E0930 17:01:54.032064 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:54.53205312 +0000 UTC m=+161.286325046 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.032566 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e171b72-1e48-4bf2-8c9d-5a2ce38f4588-utilities\") pod \"certified-operators-qv9v9\" (UID: \"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588\") " pod="openshift-marketplace/certified-operators-qv9v9" Sep 30 17:01:54 crc kubenswrapper[4818]: W0930 17:01:54.032818 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod79f4e678_c1bf_4bd3_b2b7_cf5efcd6df2e.slice/crio-ed842e6927800d2079bb5a4719ccd3940c4c1bb672662ed35b440792e2cc4b83 WatchSource:0}: Error finding container ed842e6927800d2079bb5a4719ccd3940c4c1bb672662ed35b440792e2cc4b83: Status 404 returned error can't find the container with id ed842e6927800d2079bb5a4719ccd3940c4c1bb672662ed35b440792e2cc4b83 Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.049190 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzxmj\" (UniqueName: \"kubernetes.io/projected/6e171b72-1e48-4bf2-8c9d-5a2ce38f4588-kube-api-access-wzxmj\") pod \"certified-operators-qv9v9\" (UID: \"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588\") " pod="openshift-marketplace/certified-operators-qv9v9" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.069386 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-44qds"] Sep 30 17:01:54 crc kubenswrapper[4818]: W0930 17:01:54.077593 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda59d1ab7_3891_4385_9f6e_8ca1a0bdf204.slice/crio-19383766f32f8b2f4e07fd5ce53be18e35b73429e9f7b18966d3e058cf90da83 WatchSource:0}: Error finding container 19383766f32f8b2f4e07fd5ce53be18e35b73429e9f7b18966d3e058cf90da83: Status 404 returned error can't find the container with id 19383766f32f8b2f4e07fd5ce53be18e35b73429e9f7b18966d3e058cf90da83 Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.132167 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:54 crc kubenswrapper[4818]: E0930 17:01:54.132589 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-30 17:01:54.632573811 +0000 UTC m=+161.386845627 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.172787 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qv9v9" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.220735 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mmw5q"] Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.232910 4818 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-09-30T17:01:53.946168594Z","Handler":null,"Name":""} Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.233870 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:54 crc kubenswrapper[4818]: E0930 17:01:54.235197 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-30 17:01:54.735183092 +0000 UTC m=+161.489454908 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kml8f" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.244416 4818 patch_prober.go:28] interesting pod/router-default-5444994796-gnmfp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 30 17:01:54 crc kubenswrapper[4818]: [-]has-synced failed: reason withheld Sep 30 17:01:54 crc kubenswrapper[4818]: [+]process-running ok Sep 30 17:01:54 crc kubenswrapper[4818]: healthz check failed Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.244467 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gnmfp" podUID="d8970c17-d95f-454b-ac56-db24223ef2fc" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.245467 4818 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.245496 4818 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.335304 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.339049 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.382876 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qv9v9"] Sep 30 17:01:54 crc kubenswrapper[4818]: W0930 17:01:54.390349 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6e171b72_1e48_4bf2_8c9d_5a2ce38f4588.slice/crio-9137c438098995b0efd229acb91bd8670b6974a5eb4042b71056344d924973ad WatchSource:0}: Error finding container 9137c438098995b0efd229acb91bd8670b6974a5eb4042b71056344d924973ad: Status 404 returned error can't find the container with id 9137c438098995b0efd229acb91bd8670b6974a5eb4042b71056344d924973ad Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.436892 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.441185 4818 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.441230 4818 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.449081 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.449797 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.458978 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.459288 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.466520 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.480056 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kml8f\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.608285 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.640533 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/828db2f6-cb32-4e0c-85df-164eddd8e1ed-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"828db2f6-cb32-4e0c-85df-164eddd8e1ed\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.640622 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/828db2f6-cb32-4e0c-85df-164eddd8e1ed-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"828db2f6-cb32-4e0c-85df-164eddd8e1ed\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.741761 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/828db2f6-cb32-4e0c-85df-164eddd8e1ed-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"828db2f6-cb32-4e0c-85df-164eddd8e1ed\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.742117 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/828db2f6-cb32-4e0c-85df-164eddd8e1ed-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"828db2f6-cb32-4e0c-85df-164eddd8e1ed\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.741867 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/828db2f6-cb32-4e0c-85df-164eddd8e1ed-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"828db2f6-cb32-4e0c-85df-164eddd8e1ed\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.770685 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/828db2f6-cb32-4e0c-85df-164eddd8e1ed-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"828db2f6-cb32-4e0c-85df-164eddd8e1ed\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.813636 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kml8f"] Sep 30 17:01:54 crc kubenswrapper[4818]: W0930 17:01:54.830408 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e5fa0dd_8926_45e0_a31d_e6db6f1bdebb.slice/crio-cec52efde7303d349c8fd497103eb183b125578020f9ec34f8dda83fe6decb98 WatchSource:0}: Error finding container cec52efde7303d349c8fd497103eb183b125578020f9ec34f8dda83fe6decb98: Status 404 returned error can't find the container with id cec52efde7303d349c8fd497103eb183b125578020f9ec34f8dda83fe6decb98 Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.934509 4818 patch_prober.go:28] interesting pod/downloads-7954f5f757-7r27t container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.934577 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7r27t" podUID="8b00b5de-92e6-45ef-bd66-2f06b0b0e249" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.934905 4818 patch_prober.go:28] interesting pod/downloads-7954f5f757-7r27t container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.934972 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7r27t" podUID="8b00b5de-92e6-45ef-bd66-2f06b0b0e249" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.973278 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.973318 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.974860 4818 patch_prober.go:28] interesting pod/console-f9d7485db-wr9kd container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.974899 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-wr9kd" podUID="4709760d-9993-42d3-97c3-bd5470b9c8ab" containerName="console" probeResult="failure" output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" Sep 30 17:01:54 crc kubenswrapper[4818]: I0930 17:01:54.977405 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.028858 4818 generic.go:334] "Generic (PLEG): container finished" podID="a59d1ab7-3891-4385-9f6e-8ca1a0bdf204" containerID="91176e260618b8d89b9d65ff9dc5491a72d3d601758a2b0a40825356ddaad040" exitCode=0 Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.028969 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-44qds" event={"ID":"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204","Type":"ContainerDied","Data":"91176e260618b8d89b9d65ff9dc5491a72d3d601758a2b0a40825356ddaad040"} Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.028996 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-44qds" event={"ID":"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204","Type":"ContainerStarted","Data":"19383766f32f8b2f4e07fd5ce53be18e35b73429e9f7b18966d3e058cf90da83"} Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.031243 4818 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.051548 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" event={"ID":"d4db418f-d110-4268-bf5f-6a1c9e565f84","Type":"ContainerStarted","Data":"5fe976a3bc78c049556f4bc622608d8eb396cdd5964c29d7f8bdc40f3d31f8bf"} Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.055691 4818 generic.go:334] "Generic (PLEG): container finished" podID="6e171b72-1e48-4bf2-8c9d-5a2ce38f4588" containerID="c4f6a2a2667cccdace508263c1f96895ca02b0e53561b160ea9338041e9e90ea" exitCode=0 Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.055857 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qv9v9" event={"ID":"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588","Type":"ContainerDied","Data":"c4f6a2a2667cccdace508263c1f96895ca02b0e53561b160ea9338041e9e90ea"} Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.056115 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qv9v9" event={"ID":"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588","Type":"ContainerStarted","Data":"9137c438098995b0efd229acb91bd8670b6974a5eb4042b71056344d924973ad"} Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.072356 4818 generic.go:334] "Generic (PLEG): container finished" podID="79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e" containerID="a3eafb37892b6da074cdb6dca6bd7e139a6a80d0640fb1077fb90f503a24fbd4" exitCode=0 Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.072491 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-28tlv" event={"ID":"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e","Type":"ContainerDied","Data":"a3eafb37892b6da074cdb6dca6bd7e139a6a80d0640fb1077fb90f503a24fbd4"} Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.072522 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-28tlv" event={"ID":"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e","Type":"ContainerStarted","Data":"ed842e6927800d2079bb5a4719ccd3940c4c1bb672662ed35b440792e2cc4b83"} Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.093770 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-kvwlg" podStartSLOduration=12.093749145 podStartE2EDuration="12.093749145s" podCreationTimestamp="2025-09-30 17:01:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:55.079156161 +0000 UTC m=+161.833427987" watchObservedRunningTime="2025-09-30 17:01:55.093749145 +0000 UTC m=+161.848020961" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.113388 4818 generic.go:334] "Generic (PLEG): container finished" podID="ea56cc9d-63e6-4e68-8f01-d93c321c6854" containerID="fff003e0a3f0a09cc3cf0ab11f1548262811ab8ef41ccef8a6fe4936011bfbc8" exitCode=0 Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.113499 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmw5q" event={"ID":"ea56cc9d-63e6-4e68-8f01-d93c321c6854","Type":"ContainerDied","Data":"fff003e0a3f0a09cc3cf0ab11f1548262811ab8ef41ccef8a6fe4936011bfbc8"} Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.113544 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmw5q" event={"ID":"ea56cc9d-63e6-4e68-8f01-d93c321c6854","Type":"ContainerStarted","Data":"6a13bc0b88cbbb959abe4ffeabaf1aff2e43078126fa8ee835013cc8afebfc0d"} Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.120103 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" event={"ID":"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb","Type":"ContainerStarted","Data":"f3f918c7fd7a129ff7a0cf4d1ea50520fa54c7862d90ddc903d51cdb4e7d5dc8"} Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.120147 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" event={"ID":"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb","Type":"ContainerStarted","Data":"cec52efde7303d349c8fd497103eb183b125578020f9ec34f8dda83fe6decb98"} Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.120723 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.218627 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" podStartSLOduration=136.218609297 podStartE2EDuration="2m16.218609297s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:55.21731556 +0000 UTC m=+161.971587376" watchObservedRunningTime="2025-09-30 17:01:55.218609297 +0000 UTC m=+161.972881113" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.242048 4818 patch_prober.go:28] interesting pod/router-default-5444994796-gnmfp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 30 17:01:55 crc kubenswrapper[4818]: [-]has-synced failed: reason withheld Sep 30 17:01:55 crc kubenswrapper[4818]: [+]process-running ok Sep 30 17:01:55 crc kubenswrapper[4818]: healthz check failed Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.242097 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gnmfp" podUID="d8970c17-d95f-454b-ac56-db24223ef2fc" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.273545 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.277816 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-qv4dz" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.440313 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-d4v8q"] Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.441225 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d4v8q" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.443575 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.452338 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d4v8q"] Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.482402 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.559118 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8srpc\" (UniqueName: \"kubernetes.io/projected/f9458d30-6f34-4e65-94a3-dc5787773b24-kube-api-access-8srpc\") pod \"redhat-marketplace-d4v8q\" (UID: \"f9458d30-6f34-4e65-94a3-dc5787773b24\") " pod="openshift-marketplace/redhat-marketplace-d4v8q" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.559193 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9458d30-6f34-4e65-94a3-dc5787773b24-catalog-content\") pod \"redhat-marketplace-d4v8q\" (UID: \"f9458d30-6f34-4e65-94a3-dc5787773b24\") " pod="openshift-marketplace/redhat-marketplace-d4v8q" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.559239 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9458d30-6f34-4e65-94a3-dc5787773b24-utilities\") pod \"redhat-marketplace-d4v8q\" (UID: \"f9458d30-6f34-4e65-94a3-dc5787773b24\") " pod="openshift-marketplace/redhat-marketplace-d4v8q" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.660614 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9458d30-6f34-4e65-94a3-dc5787773b24-catalog-content\") pod \"redhat-marketplace-d4v8q\" (UID: \"f9458d30-6f34-4e65-94a3-dc5787773b24\") " pod="openshift-marketplace/redhat-marketplace-d4v8q" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.660677 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9458d30-6f34-4e65-94a3-dc5787773b24-utilities\") pod \"redhat-marketplace-d4v8q\" (UID: \"f9458d30-6f34-4e65-94a3-dc5787773b24\") " pod="openshift-marketplace/redhat-marketplace-d4v8q" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.660713 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8srpc\" (UniqueName: \"kubernetes.io/projected/f9458d30-6f34-4e65-94a3-dc5787773b24-kube-api-access-8srpc\") pod \"redhat-marketplace-d4v8q\" (UID: \"f9458d30-6f34-4e65-94a3-dc5787773b24\") " pod="openshift-marketplace/redhat-marketplace-d4v8q" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.661289 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9458d30-6f34-4e65-94a3-dc5787773b24-catalog-content\") pod \"redhat-marketplace-d4v8q\" (UID: \"f9458d30-6f34-4e65-94a3-dc5787773b24\") " pod="openshift-marketplace/redhat-marketplace-d4v8q" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.661593 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9458d30-6f34-4e65-94a3-dc5787773b24-utilities\") pod \"redhat-marketplace-d4v8q\" (UID: \"f9458d30-6f34-4e65-94a3-dc5787773b24\") " pod="openshift-marketplace/redhat-marketplace-d4v8q" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.693806 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8srpc\" (UniqueName: \"kubernetes.io/projected/f9458d30-6f34-4e65-94a3-dc5787773b24-kube-api-access-8srpc\") pod \"redhat-marketplace-d4v8q\" (UID: \"f9458d30-6f34-4e65-94a3-dc5787773b24\") " pod="openshift-marketplace/redhat-marketplace-d4v8q" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.754081 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d4v8q" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.848884 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-48kp2"] Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.850462 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-48kp2" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.863235 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-48kp2"] Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.881245 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.882257 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.888695 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.888945 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.898289 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.965596 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e764b4c-fdff-4e32-bdec-17a1d80acb31-catalog-content\") pod \"redhat-marketplace-48kp2\" (UID: \"0e764b4c-fdff-4e32-bdec-17a1d80acb31\") " pod="openshift-marketplace/redhat-marketplace-48kp2" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.965694 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e764b4c-fdff-4e32-bdec-17a1d80acb31-utilities\") pod \"redhat-marketplace-48kp2\" (UID: \"0e764b4c-fdff-4e32-bdec-17a1d80acb31\") " pod="openshift-marketplace/redhat-marketplace-48kp2" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.965733 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrls2\" (UniqueName: \"kubernetes.io/projected/0e764b4c-fdff-4e32-bdec-17a1d80acb31-kube-api-access-jrls2\") pod \"redhat-marketplace-48kp2\" (UID: \"0e764b4c-fdff-4e32-bdec-17a1d80acb31\") " pod="openshift-marketplace/redhat-marketplace-48kp2" Sep 30 17:01:55 crc kubenswrapper[4818]: I0930 17:01:55.981367 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d4v8q"] Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.033644 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.066458 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrls2\" (UniqueName: \"kubernetes.io/projected/0e764b4c-fdff-4e32-bdec-17a1d80acb31-kube-api-access-jrls2\") pod \"redhat-marketplace-48kp2\" (UID: \"0e764b4c-fdff-4e32-bdec-17a1d80acb31\") " pod="openshift-marketplace/redhat-marketplace-48kp2" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.066511 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3515ffdd-13b1-4957-b2c3-87115f35313e-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"3515ffdd-13b1-4957-b2c3-87115f35313e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.066565 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e764b4c-fdff-4e32-bdec-17a1d80acb31-catalog-content\") pod \"redhat-marketplace-48kp2\" (UID: \"0e764b4c-fdff-4e32-bdec-17a1d80acb31\") " pod="openshift-marketplace/redhat-marketplace-48kp2" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.066598 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3515ffdd-13b1-4957-b2c3-87115f35313e-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"3515ffdd-13b1-4957-b2c3-87115f35313e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.066624 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e764b4c-fdff-4e32-bdec-17a1d80acb31-utilities\") pod \"redhat-marketplace-48kp2\" (UID: \"0e764b4c-fdff-4e32-bdec-17a1d80acb31\") " pod="openshift-marketplace/redhat-marketplace-48kp2" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.067007 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e764b4c-fdff-4e32-bdec-17a1d80acb31-utilities\") pod \"redhat-marketplace-48kp2\" (UID: \"0e764b4c-fdff-4e32-bdec-17a1d80acb31\") " pod="openshift-marketplace/redhat-marketplace-48kp2" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.067237 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e764b4c-fdff-4e32-bdec-17a1d80acb31-catalog-content\") pod \"redhat-marketplace-48kp2\" (UID: \"0e764b4c-fdff-4e32-bdec-17a1d80acb31\") " pod="openshift-marketplace/redhat-marketplace-48kp2" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.087808 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrls2\" (UniqueName: \"kubernetes.io/projected/0e764b4c-fdff-4e32-bdec-17a1d80acb31-kube-api-access-jrls2\") pod \"redhat-marketplace-48kp2\" (UID: \"0e764b4c-fdff-4e32-bdec-17a1d80acb31\") " pod="openshift-marketplace/redhat-marketplace-48kp2" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.126725 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"828db2f6-cb32-4e0c-85df-164eddd8e1ed","Type":"ContainerStarted","Data":"75652dc6181a4184191f1cf7b972caf1e526fc19a2404a9f8393446498f1fafc"} Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.126790 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"828db2f6-cb32-4e0c-85df-164eddd8e1ed","Type":"ContainerStarted","Data":"afbc422f268730671d094e5719106c5f56b33e85e031503b37d432a3569bf4c1"} Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.135427 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d4v8q" event={"ID":"f9458d30-6f34-4e65-94a3-dc5787773b24","Type":"ContainerStarted","Data":"0a2acdf55b6d292cefb8e9c3ee0438bbe487750e3f6104823db481eede61699c"} Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.135486 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d4v8q" event={"ID":"f9458d30-6f34-4e65-94a3-dc5787773b24","Type":"ContainerStarted","Data":"8379f1140c78b33f549e4e22d2e5df78c7a7a665646fd52bbc7f3f71dd7b0d7d"} Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.143397 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.143382327 podStartE2EDuration="2.143382327s" podCreationTimestamp="2025-09-30 17:01:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:56.140805904 +0000 UTC m=+162.895077730" watchObservedRunningTime="2025-09-30 17:01:56.143382327 +0000 UTC m=+162.897654143" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.167375 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3515ffdd-13b1-4957-b2c3-87115f35313e-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"3515ffdd-13b1-4957-b2c3-87115f35313e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.167688 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3515ffdd-13b1-4957-b2c3-87115f35313e-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"3515ffdd-13b1-4957-b2c3-87115f35313e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.167506 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3515ffdd-13b1-4957-b2c3-87115f35313e-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"3515ffdd-13b1-4957-b2c3-87115f35313e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.182943 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3515ffdd-13b1-4957-b2c3-87115f35313e-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"3515ffdd-13b1-4957-b2c3-87115f35313e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.187836 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-48kp2" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.210339 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.237232 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.239573 4818 patch_prober.go:28] interesting pod/router-default-5444994796-gnmfp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 30 17:01:56 crc kubenswrapper[4818]: [-]has-synced failed: reason withheld Sep 30 17:01:56 crc kubenswrapper[4818]: [+]process-running ok Sep 30 17:01:56 crc kubenswrapper[4818]: healthz check failed Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.239625 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gnmfp" podUID="d8970c17-d95f-454b-ac56-db24223ef2fc" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.442636 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pjn54"] Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.443851 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pjn54" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.448322 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.452131 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pjn54"] Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.519253 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Sep 30 17:01:56 crc kubenswrapper[4818]: W0930 17:01:56.531593 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod3515ffdd_13b1_4957_b2c3_87115f35313e.slice/crio-2ea612e05074ab9bebe84d07bcd3f9b8e827799eb83ff767216bd779bf388803 WatchSource:0}: Error finding container 2ea612e05074ab9bebe84d07bcd3f9b8e827799eb83ff767216bd779bf388803: Status 404 returned error can't find the container with id 2ea612e05074ab9bebe84d07bcd3f9b8e827799eb83ff767216bd779bf388803 Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.577005 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwhkv\" (UniqueName: \"kubernetes.io/projected/30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6-kube-api-access-dwhkv\") pod \"redhat-operators-pjn54\" (UID: \"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6\") " pod="openshift-marketplace/redhat-operators-pjn54" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.577077 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6-utilities\") pod \"redhat-operators-pjn54\" (UID: \"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6\") " pod="openshift-marketplace/redhat-operators-pjn54" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.577094 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6-catalog-content\") pod \"redhat-operators-pjn54\" (UID: \"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6\") " pod="openshift-marketplace/redhat-operators-pjn54" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.643255 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-48kp2"] Sep 30 17:01:56 crc kubenswrapper[4818]: W0930 17:01:56.652658 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e764b4c_fdff_4e32_bdec_17a1d80acb31.slice/crio-4d111d8b825ddfa164b86d4341596dd1949accfa9f8426033ac587fb34618346 WatchSource:0}: Error finding container 4d111d8b825ddfa164b86d4341596dd1949accfa9f8426033ac587fb34618346: Status 404 returned error can't find the container with id 4d111d8b825ddfa164b86d4341596dd1949accfa9f8426033ac587fb34618346 Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.678377 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwhkv\" (UniqueName: \"kubernetes.io/projected/30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6-kube-api-access-dwhkv\") pod \"redhat-operators-pjn54\" (UID: \"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6\") " pod="openshift-marketplace/redhat-operators-pjn54" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.678438 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6-utilities\") pod \"redhat-operators-pjn54\" (UID: \"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6\") " pod="openshift-marketplace/redhat-operators-pjn54" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.678456 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6-catalog-content\") pod \"redhat-operators-pjn54\" (UID: \"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6\") " pod="openshift-marketplace/redhat-operators-pjn54" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.678825 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6-catalog-content\") pod \"redhat-operators-pjn54\" (UID: \"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6\") " pod="openshift-marketplace/redhat-operators-pjn54" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.679125 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6-utilities\") pod \"redhat-operators-pjn54\" (UID: \"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6\") " pod="openshift-marketplace/redhat-operators-pjn54" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.699872 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwhkv\" (UniqueName: \"kubernetes.io/projected/30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6-kube-api-access-dwhkv\") pod \"redhat-operators-pjn54\" (UID: \"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6\") " pod="openshift-marketplace/redhat-operators-pjn54" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.768813 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pjn54" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.839748 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xgbw4"] Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.840724 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xgbw4" Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.851871 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xgbw4"] Sep 30 17:01:56 crc kubenswrapper[4818]: I0930 17:01:56.973206 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.001519 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a994ace-967b-4a52-9289-6b59bb9b699b-catalog-content\") pod \"redhat-operators-xgbw4\" (UID: \"0a994ace-967b-4a52-9289-6b59bb9b699b\") " pod="openshift-marketplace/redhat-operators-xgbw4" Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.001573 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjhp9\" (UniqueName: \"kubernetes.io/projected/0a994ace-967b-4a52-9289-6b59bb9b699b-kube-api-access-fjhp9\") pod \"redhat-operators-xgbw4\" (UID: \"0a994ace-967b-4a52-9289-6b59bb9b699b\") " pod="openshift-marketplace/redhat-operators-xgbw4" Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.001608 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a994ace-967b-4a52-9289-6b59bb9b699b-utilities\") pod \"redhat-operators-xgbw4\" (UID: \"0a994ace-967b-4a52-9289-6b59bb9b699b\") " pod="openshift-marketplace/redhat-operators-xgbw4" Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.019718 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9qx5l" Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.069521 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4m5bh" Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.105206 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a994ace-967b-4a52-9289-6b59bb9b699b-catalog-content\") pod \"redhat-operators-xgbw4\" (UID: \"0a994ace-967b-4a52-9289-6b59bb9b699b\") " pod="openshift-marketplace/redhat-operators-xgbw4" Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.106043 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a994ace-967b-4a52-9289-6b59bb9b699b-catalog-content\") pod \"redhat-operators-xgbw4\" (UID: \"0a994ace-967b-4a52-9289-6b59bb9b699b\") " pod="openshift-marketplace/redhat-operators-xgbw4" Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.107960 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjhp9\" (UniqueName: \"kubernetes.io/projected/0a994ace-967b-4a52-9289-6b59bb9b699b-kube-api-access-fjhp9\") pod \"redhat-operators-xgbw4\" (UID: \"0a994ace-967b-4a52-9289-6b59bb9b699b\") " pod="openshift-marketplace/redhat-operators-xgbw4" Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.108021 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a994ace-967b-4a52-9289-6b59bb9b699b-utilities\") pod \"redhat-operators-xgbw4\" (UID: \"0a994ace-967b-4a52-9289-6b59bb9b699b\") " pod="openshift-marketplace/redhat-operators-xgbw4" Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.109188 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a994ace-967b-4a52-9289-6b59bb9b699b-utilities\") pod \"redhat-operators-xgbw4\" (UID: \"0a994ace-967b-4a52-9289-6b59bb9b699b\") " pod="openshift-marketplace/redhat-operators-xgbw4" Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.138198 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjhp9\" (UniqueName: \"kubernetes.io/projected/0a994ace-967b-4a52-9289-6b59bb9b699b-kube-api-access-fjhp9\") pod \"redhat-operators-xgbw4\" (UID: \"0a994ace-967b-4a52-9289-6b59bb9b699b\") " pod="openshift-marketplace/redhat-operators-xgbw4" Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.150731 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3515ffdd-13b1-4957-b2c3-87115f35313e","Type":"ContainerStarted","Data":"2ea612e05074ab9bebe84d07bcd3f9b8e827799eb83ff767216bd779bf388803"} Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.153241 4818 generic.go:334] "Generic (PLEG): container finished" podID="828db2f6-cb32-4e0c-85df-164eddd8e1ed" containerID="75652dc6181a4184191f1cf7b972caf1e526fc19a2404a9f8393446498f1fafc" exitCode=0 Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.153386 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"828db2f6-cb32-4e0c-85df-164eddd8e1ed","Type":"ContainerDied","Data":"75652dc6181a4184191f1cf7b972caf1e526fc19a2404a9f8393446498f1fafc"} Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.165762 4818 generic.go:334] "Generic (PLEG): container finished" podID="41ac1391-f116-4d7b-88dd-d694671283dc" containerID="6041c2482cc1e4d4b4adf40b1b640115bbfa90ed632da156f07e4604d4b31caf" exitCode=0 Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.165899 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl" event={"ID":"41ac1391-f116-4d7b-88dd-d694671283dc","Type":"ContainerDied","Data":"6041c2482cc1e4d4b4adf40b1b640115bbfa90ed632da156f07e4604d4b31caf"} Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.176439 4818 generic.go:334] "Generic (PLEG): container finished" podID="f9458d30-6f34-4e65-94a3-dc5787773b24" containerID="0a2acdf55b6d292cefb8e9c3ee0438bbe487750e3f6104823db481eede61699c" exitCode=0 Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.176591 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d4v8q" event={"ID":"f9458d30-6f34-4e65-94a3-dc5787773b24","Type":"ContainerDied","Data":"0a2acdf55b6d292cefb8e9c3ee0438bbe487750e3f6104823db481eede61699c"} Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.183125 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pjn54"] Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.183262 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-48kp2" event={"ID":"0e764b4c-fdff-4e32-bdec-17a1d80acb31","Type":"ContainerStarted","Data":"4d111d8b825ddfa164b86d4341596dd1949accfa9f8426033ac587fb34618346"} Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.211137 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xgbw4" Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.243421 4818 patch_prober.go:28] interesting pod/router-default-5444994796-gnmfp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 30 17:01:57 crc kubenswrapper[4818]: [-]has-synced failed: reason withheld Sep 30 17:01:57 crc kubenswrapper[4818]: [+]process-running ok Sep 30 17:01:57 crc kubenswrapper[4818]: healthz check failed Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.243491 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gnmfp" podUID="d8970c17-d95f-454b-ac56-db24223ef2fc" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 30 17:01:57 crc kubenswrapper[4818]: I0930 17:01:57.611462 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xgbw4"] Sep 30 17:01:57 crc kubenswrapper[4818]: W0930 17:01:57.627202 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0a994ace_967b_4a52_9289_6b59bb9b699b.slice/crio-fb4219c6b4405a0cd96d348e239a0072162472e1115477f4843f14c6bd1a8731 WatchSource:0}: Error finding container fb4219c6b4405a0cd96d348e239a0072162472e1115477f4843f14c6bd1a8731: Status 404 returned error can't find the container with id fb4219c6b4405a0cd96d348e239a0072162472e1115477f4843f14c6bd1a8731 Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.205402 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3515ffdd-13b1-4957-b2c3-87115f35313e","Type":"ContainerStarted","Data":"458aee59a1ba720e9962240719cea3eefa1025113e35c2e296e4dbf5ec387ec3"} Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.214800 4818 generic.go:334] "Generic (PLEG): container finished" podID="30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6" containerID="0aaf29a7b67dbab019cfa36bbca6fe77d3121144a2689d280495418c5bff7c03" exitCode=0 Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.214866 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjn54" event={"ID":"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6","Type":"ContainerDied","Data":"0aaf29a7b67dbab019cfa36bbca6fe77d3121144a2689d280495418c5bff7c03"} Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.214889 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjn54" event={"ID":"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6","Type":"ContainerStarted","Data":"345be133506c2d6a1e7100bb0dce960f9d716d8041ccbbd40530cba767fb6446"} Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.219576 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=3.219561139 podStartE2EDuration="3.219561139s" podCreationTimestamp="2025-09-30 17:01:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:01:58.216890463 +0000 UTC m=+164.971162279" watchObservedRunningTime="2025-09-30 17:01:58.219561139 +0000 UTC m=+164.973832955" Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.224492 4818 generic.go:334] "Generic (PLEG): container finished" podID="0e764b4c-fdff-4e32-bdec-17a1d80acb31" containerID="48772190a0dddac2d0676c673f4ca962e48c16e9e4a53d9c908fd48d006d6bfe" exitCode=0 Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.224572 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-48kp2" event={"ID":"0e764b4c-fdff-4e32-bdec-17a1d80acb31","Type":"ContainerDied","Data":"48772190a0dddac2d0676c673f4ca962e48c16e9e4a53d9c908fd48d006d6bfe"} Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.229229 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgbw4" event={"ID":"0a994ace-967b-4a52-9289-6b59bb9b699b","Type":"ContainerStarted","Data":"a89dd9121147ba99a0a53182610c0dae475d75a9b2a2cf7d11073d69316237fb"} Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.229259 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgbw4" event={"ID":"0a994ace-967b-4a52-9289-6b59bb9b699b","Type":"ContainerStarted","Data":"fb4219c6b4405a0cd96d348e239a0072162472e1115477f4843f14c6bd1a8731"} Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.240491 4818 patch_prober.go:28] interesting pod/router-default-5444994796-gnmfp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 30 17:01:58 crc kubenswrapper[4818]: [-]has-synced failed: reason withheld Sep 30 17:01:58 crc kubenswrapper[4818]: [+]process-running ok Sep 30 17:01:58 crc kubenswrapper[4818]: healthz check failed Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.240941 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gnmfp" podUID="d8970c17-d95f-454b-ac56-db24223ef2fc" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.470620 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.556345 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/828db2f6-cb32-4e0c-85df-164eddd8e1ed-kubelet-dir\") pod \"828db2f6-cb32-4e0c-85df-164eddd8e1ed\" (UID: \"828db2f6-cb32-4e0c-85df-164eddd8e1ed\") " Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.556491 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/828db2f6-cb32-4e0c-85df-164eddd8e1ed-kube-api-access\") pod \"828db2f6-cb32-4e0c-85df-164eddd8e1ed\" (UID: \"828db2f6-cb32-4e0c-85df-164eddd8e1ed\") " Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.560034 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/828db2f6-cb32-4e0c-85df-164eddd8e1ed-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "828db2f6-cb32-4e0c-85df-164eddd8e1ed" (UID: "828db2f6-cb32-4e0c-85df-164eddd8e1ed"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.563147 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/828db2f6-cb32-4e0c-85df-164eddd8e1ed-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "828db2f6-cb32-4e0c-85df-164eddd8e1ed" (UID: "828db2f6-cb32-4e0c-85df-164eddd8e1ed"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.593470 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl" Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.657730 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/41ac1391-f116-4d7b-88dd-d694671283dc-config-volume\") pod \"41ac1391-f116-4d7b-88dd-d694671283dc\" (UID: \"41ac1391-f116-4d7b-88dd-d694671283dc\") " Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.657786 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/41ac1391-f116-4d7b-88dd-d694671283dc-secret-volume\") pod \"41ac1391-f116-4d7b-88dd-d694671283dc\" (UID: \"41ac1391-f116-4d7b-88dd-d694671283dc\") " Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.658467 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41ac1391-f116-4d7b-88dd-d694671283dc-config-volume" (OuterVolumeSpecName: "config-volume") pod "41ac1391-f116-4d7b-88dd-d694671283dc" (UID: "41ac1391-f116-4d7b-88dd-d694671283dc"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.661098 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kpbt5\" (UniqueName: \"kubernetes.io/projected/41ac1391-f116-4d7b-88dd-d694671283dc-kube-api-access-kpbt5\") pod \"41ac1391-f116-4d7b-88dd-d694671283dc\" (UID: \"41ac1391-f116-4d7b-88dd-d694671283dc\") " Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.661448 4818 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/41ac1391-f116-4d7b-88dd-d694671283dc-config-volume\") on node \"crc\" DevicePath \"\"" Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.661473 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/828db2f6-cb32-4e0c-85df-164eddd8e1ed-kube-api-access\") on node \"crc\" DevicePath \"\"" Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.661486 4818 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/828db2f6-cb32-4e0c-85df-164eddd8e1ed-kubelet-dir\") on node \"crc\" DevicePath \"\"" Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.675861 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41ac1391-f116-4d7b-88dd-d694671283dc-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "41ac1391-f116-4d7b-88dd-d694671283dc" (UID: "41ac1391-f116-4d7b-88dd-d694671283dc"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.676006 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41ac1391-f116-4d7b-88dd-d694671283dc-kube-api-access-kpbt5" (OuterVolumeSpecName: "kube-api-access-kpbt5") pod "41ac1391-f116-4d7b-88dd-d694671283dc" (UID: "41ac1391-f116-4d7b-88dd-d694671283dc"). InnerVolumeSpecName "kube-api-access-kpbt5". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.762510 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kpbt5\" (UniqueName: \"kubernetes.io/projected/41ac1391-f116-4d7b-88dd-d694671283dc-kube-api-access-kpbt5\") on node \"crc\" DevicePath \"\"" Sep 30 17:01:58 crc kubenswrapper[4818]: I0930 17:01:58.762585 4818 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/41ac1391-f116-4d7b-88dd-d694671283dc-secret-volume\") on node \"crc\" DevicePath \"\"" Sep 30 17:01:59 crc kubenswrapper[4818]: I0930 17:01:59.236961 4818 generic.go:334] "Generic (PLEG): container finished" podID="0a994ace-967b-4a52-9289-6b59bb9b699b" containerID="a89dd9121147ba99a0a53182610c0dae475d75a9b2a2cf7d11073d69316237fb" exitCode=0 Sep 30 17:01:59 crc kubenswrapper[4818]: I0930 17:01:59.237047 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgbw4" event={"ID":"0a994ace-967b-4a52-9289-6b59bb9b699b","Type":"ContainerDied","Data":"a89dd9121147ba99a0a53182610c0dae475d75a9b2a2cf7d11073d69316237fb"} Sep 30 17:01:59 crc kubenswrapper[4818]: I0930 17:01:59.239679 4818 patch_prober.go:28] interesting pod/router-default-5444994796-gnmfp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 30 17:01:59 crc kubenswrapper[4818]: [-]has-synced failed: reason withheld Sep 30 17:01:59 crc kubenswrapper[4818]: [+]process-running ok Sep 30 17:01:59 crc kubenswrapper[4818]: healthz check failed Sep 30 17:01:59 crc kubenswrapper[4818]: I0930 17:01:59.239723 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gnmfp" podUID="d8970c17-d95f-454b-ac56-db24223ef2fc" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 30 17:01:59 crc kubenswrapper[4818]: I0930 17:01:59.242137 4818 generic.go:334] "Generic (PLEG): container finished" podID="3515ffdd-13b1-4957-b2c3-87115f35313e" containerID="458aee59a1ba720e9962240719cea3eefa1025113e35c2e296e4dbf5ec387ec3" exitCode=0 Sep 30 17:01:59 crc kubenswrapper[4818]: I0930 17:01:59.242192 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3515ffdd-13b1-4957-b2c3-87115f35313e","Type":"ContainerDied","Data":"458aee59a1ba720e9962240719cea3eefa1025113e35c2e296e4dbf5ec387ec3"} Sep 30 17:01:59 crc kubenswrapper[4818]: I0930 17:01:59.244236 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"828db2f6-cb32-4e0c-85df-164eddd8e1ed","Type":"ContainerDied","Data":"afbc422f268730671d094e5719106c5f56b33e85e031503b37d432a3569bf4c1"} Sep 30 17:01:59 crc kubenswrapper[4818]: I0930 17:01:59.244274 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="afbc422f268730671d094e5719106c5f56b33e85e031503b37d432a3569bf4c1" Sep 30 17:01:59 crc kubenswrapper[4818]: I0930 17:01:59.244328 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 30 17:01:59 crc kubenswrapper[4818]: I0930 17:01:59.262200 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl" Sep 30 17:01:59 crc kubenswrapper[4818]: I0930 17:01:59.267047 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29320860-4bffl" event={"ID":"41ac1391-f116-4d7b-88dd-d694671283dc","Type":"ContainerDied","Data":"9d374d272cd690127bb6878063a545be04ae787588a391557042920622bff077"} Sep 30 17:01:59 crc kubenswrapper[4818]: I0930 17:01:59.267103 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d374d272cd690127bb6878063a545be04ae787588a391557042920622bff077" Sep 30 17:02:00 crc kubenswrapper[4818]: I0930 17:02:00.238798 4818 patch_prober.go:28] interesting pod/router-default-5444994796-gnmfp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 30 17:02:00 crc kubenswrapper[4818]: [-]has-synced failed: reason withheld Sep 30 17:02:00 crc kubenswrapper[4818]: [+]process-running ok Sep 30 17:02:00 crc kubenswrapper[4818]: healthz check failed Sep 30 17:02:00 crc kubenswrapper[4818]: I0930 17:02:00.239131 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gnmfp" podUID="d8970c17-d95f-454b-ac56-db24223ef2fc" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 30 17:02:01 crc kubenswrapper[4818]: I0930 17:02:01.246165 4818 patch_prober.go:28] interesting pod/router-default-5444994796-gnmfp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 30 17:02:01 crc kubenswrapper[4818]: [-]has-synced failed: reason withheld Sep 30 17:02:01 crc kubenswrapper[4818]: [+]process-running ok Sep 30 17:02:01 crc kubenswrapper[4818]: healthz check failed Sep 30 17:02:01 crc kubenswrapper[4818]: I0930 17:02:01.246231 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gnmfp" podUID="d8970c17-d95f-454b-ac56-db24223ef2fc" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 30 17:02:02 crc kubenswrapper[4818]: I0930 17:02:02.031849 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-scg48" Sep 30 17:02:02 crc kubenswrapper[4818]: I0930 17:02:02.240768 4818 patch_prober.go:28] interesting pod/router-default-5444994796-gnmfp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 30 17:02:02 crc kubenswrapper[4818]: [-]has-synced failed: reason withheld Sep 30 17:02:02 crc kubenswrapper[4818]: [+]process-running ok Sep 30 17:02:02 crc kubenswrapper[4818]: healthz check failed Sep 30 17:02:02 crc kubenswrapper[4818]: I0930 17:02:02.240830 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gnmfp" podUID="d8970c17-d95f-454b-ac56-db24223ef2fc" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 30 17:02:02 crc kubenswrapper[4818]: I0930 17:02:02.521559 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs\") pod \"network-metrics-daemon-4p4hg\" (UID: \"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\") " pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:02:02 crc kubenswrapper[4818]: I0930 17:02:02.526551 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3712d08f-58c2-4fff-9d9f-443ba37fc9c0-metrics-certs\") pod \"network-metrics-daemon-4p4hg\" (UID: \"3712d08f-58c2-4fff-9d9f-443ba37fc9c0\") " pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:02:02 crc kubenswrapper[4818]: I0930 17:02:02.656793 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4p4hg" Sep 30 17:02:03 crc kubenswrapper[4818]: I0930 17:02:03.240377 4818 patch_prober.go:28] interesting pod/router-default-5444994796-gnmfp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 30 17:02:03 crc kubenswrapper[4818]: [-]has-synced failed: reason withheld Sep 30 17:02:03 crc kubenswrapper[4818]: [+]process-running ok Sep 30 17:02:03 crc kubenswrapper[4818]: healthz check failed Sep 30 17:02:03 crc kubenswrapper[4818]: I0930 17:02:03.240448 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gnmfp" podUID="d8970c17-d95f-454b-ac56-db24223ef2fc" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 30 17:02:04 crc kubenswrapper[4818]: I0930 17:02:04.242027 4818 patch_prober.go:28] interesting pod/router-default-5444994796-gnmfp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 30 17:02:04 crc kubenswrapper[4818]: [-]has-synced failed: reason withheld Sep 30 17:02:04 crc kubenswrapper[4818]: [+]process-running ok Sep 30 17:02:04 crc kubenswrapper[4818]: healthz check failed Sep 30 17:02:04 crc kubenswrapper[4818]: I0930 17:02:04.242093 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gnmfp" podUID="d8970c17-d95f-454b-ac56-db24223ef2fc" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 30 17:02:04 crc kubenswrapper[4818]: I0930 17:02:04.934393 4818 patch_prober.go:28] interesting pod/downloads-7954f5f757-7r27t container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Sep 30 17:02:04 crc kubenswrapper[4818]: I0930 17:02:04.934689 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7r27t" podUID="8b00b5de-92e6-45ef-bd66-2f06b0b0e249" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Sep 30 17:02:04 crc kubenswrapper[4818]: I0930 17:02:04.934836 4818 patch_prober.go:28] interesting pod/downloads-7954f5f757-7r27t container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Sep 30 17:02:04 crc kubenswrapper[4818]: I0930 17:02:04.934896 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7r27t" podUID="8b00b5de-92e6-45ef-bd66-2f06b0b0e249" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Sep 30 17:02:04 crc kubenswrapper[4818]: I0930 17:02:04.973282 4818 patch_prober.go:28] interesting pod/console-f9d7485db-wr9kd container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Sep 30 17:02:04 crc kubenswrapper[4818]: I0930 17:02:04.973350 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-wr9kd" podUID="4709760d-9993-42d3-97c3-bd5470b9c8ab" containerName="console" probeResult="failure" output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" Sep 30 17:02:05 crc kubenswrapper[4818]: I0930 17:02:05.227660 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 30 17:02:05 crc kubenswrapper[4818]: I0930 17:02:05.242883 4818 patch_prober.go:28] interesting pod/router-default-5444994796-gnmfp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 30 17:02:05 crc kubenswrapper[4818]: [-]has-synced failed: reason withheld Sep 30 17:02:05 crc kubenswrapper[4818]: [+]process-running ok Sep 30 17:02:05 crc kubenswrapper[4818]: healthz check failed Sep 30 17:02:05 crc kubenswrapper[4818]: I0930 17:02:05.243046 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gnmfp" podUID="d8970c17-d95f-454b-ac56-db24223ef2fc" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 30 17:02:05 crc kubenswrapper[4818]: I0930 17:02:05.261478 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3515ffdd-13b1-4957-b2c3-87115f35313e-kubelet-dir\") pod \"3515ffdd-13b1-4957-b2c3-87115f35313e\" (UID: \"3515ffdd-13b1-4957-b2c3-87115f35313e\") " Sep 30 17:02:05 crc kubenswrapper[4818]: I0930 17:02:05.261558 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3515ffdd-13b1-4957-b2c3-87115f35313e-kube-api-access\") pod \"3515ffdd-13b1-4957-b2c3-87115f35313e\" (UID: \"3515ffdd-13b1-4957-b2c3-87115f35313e\") " Sep 30 17:02:05 crc kubenswrapper[4818]: I0930 17:02:05.261917 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3515ffdd-13b1-4957-b2c3-87115f35313e-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "3515ffdd-13b1-4957-b2c3-87115f35313e" (UID: "3515ffdd-13b1-4957-b2c3-87115f35313e"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:02:05 crc kubenswrapper[4818]: I0930 17:02:05.272348 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3515ffdd-13b1-4957-b2c3-87115f35313e-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "3515ffdd-13b1-4957-b2c3-87115f35313e" (UID: "3515ffdd-13b1-4957-b2c3-87115f35313e"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:02:05 crc kubenswrapper[4818]: I0930 17:02:05.297031 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3515ffdd-13b1-4957-b2c3-87115f35313e","Type":"ContainerDied","Data":"2ea612e05074ab9bebe84d07bcd3f9b8e827799eb83ff767216bd779bf388803"} Sep 30 17:02:05 crc kubenswrapper[4818]: I0930 17:02:05.297070 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2ea612e05074ab9bebe84d07bcd3f9b8e827799eb83ff767216bd779bf388803" Sep 30 17:02:05 crc kubenswrapper[4818]: I0930 17:02:05.297119 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 30 17:02:05 crc kubenswrapper[4818]: I0930 17:02:05.363746 4818 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3515ffdd-13b1-4957-b2c3-87115f35313e-kubelet-dir\") on node \"crc\" DevicePath \"\"" Sep 30 17:02:05 crc kubenswrapper[4818]: I0930 17:02:05.363795 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3515ffdd-13b1-4957-b2c3-87115f35313e-kube-api-access\") on node \"crc\" DevicePath \"\"" Sep 30 17:02:06 crc kubenswrapper[4818]: I0930 17:02:06.241120 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:02:06 crc kubenswrapper[4818]: I0930 17:02:06.244474 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-gnmfp" Sep 30 17:02:12 crc kubenswrapper[4818]: I0930 17:02:12.430225 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 30 17:02:14 crc kubenswrapper[4818]: I0930 17:02:14.615412 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:02:14 crc kubenswrapper[4818]: I0930 17:02:14.963464 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-7r27t" Sep 30 17:02:14 crc kubenswrapper[4818]: I0930 17:02:14.979797 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:02:14 crc kubenswrapper[4818]: I0930 17:02:14.987751 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:02:15 crc kubenswrapper[4818]: E0930 17:02:15.042192 4818 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Sep 30 17:02:15 crc kubenswrapper[4818]: E0930 17:02:15.042431 4818 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tkg55,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-28tlv_openshift-marketplace(79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Sep 30 17:02:15 crc kubenswrapper[4818]: E0930 17:02:15.045579 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-28tlv" podUID="79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e" Sep 30 17:02:15 crc kubenswrapper[4818]: E0930 17:02:15.823741 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-28tlv" podUID="79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e" Sep 30 17:02:20 crc kubenswrapper[4818]: E0930 17:02:20.946711 4818 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Sep 30 17:02:20 crc kubenswrapper[4818]: E0930 17:02:20.947309 4818 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5f5fp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-mmw5q_openshift-marketplace(ea56cc9d-63e6-4e68-8f01-d93c321c6854): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Sep 30 17:02:20 crc kubenswrapper[4818]: E0930 17:02:20.948886 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-mmw5q" podUID="ea56cc9d-63e6-4e68-8f01-d93c321c6854" Sep 30 17:02:20 crc kubenswrapper[4818]: E0930 17:02:20.990750 4818 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Sep 30 17:02:20 crc kubenswrapper[4818]: E0930 17:02:20.991429 4818 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jcl4q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-44qds_openshift-marketplace(a59d1ab7-3891-4385-9f6e-8ca1a0bdf204): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Sep 30 17:02:20 crc kubenswrapper[4818]: E0930 17:02:20.993768 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-44qds" podUID="a59d1ab7-3891-4385-9f6e-8ca1a0bdf204" Sep 30 17:02:21 crc kubenswrapper[4818]: I0930 17:02:21.285572 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-4p4hg"] Sep 30 17:02:22 crc kubenswrapper[4818]: E0930 17:02:22.564628 4818 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Sep 30 17:02:22 crc kubenswrapper[4818]: E0930 17:02:22.564782 4818 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wzxmj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-qv9v9_openshift-marketplace(6e171b72-1e48-4bf2-8c9d-5a2ce38f4588): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Sep 30 17:02:22 crc kubenswrapper[4818]: E0930 17:02:22.565962 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-qv9v9" podUID="6e171b72-1e48-4bf2-8c9d-5a2ce38f4588" Sep 30 17:02:22 crc kubenswrapper[4818]: I0930 17:02:22.596341 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:02:22 crc kubenswrapper[4818]: I0930 17:02:22.596404 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:02:24 crc kubenswrapper[4818]: E0930 17:02:24.924654 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-qv9v9" podUID="6e171b72-1e48-4bf2-8c9d-5a2ce38f4588" Sep 30 17:02:24 crc kubenswrapper[4818]: E0930 17:02:24.924883 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-mmw5q" podUID="ea56cc9d-63e6-4e68-8f01-d93c321c6854" Sep 30 17:02:24 crc kubenswrapper[4818]: E0930 17:02:24.924749 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-44qds" podUID="a59d1ab7-3891-4385-9f6e-8ca1a0bdf204" Sep 30 17:02:27 crc kubenswrapper[4818]: I0930 17:02:27.194963 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9g8j4" Sep 30 17:02:27 crc kubenswrapper[4818]: I0930 17:02:27.420914 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjn54" event={"ID":"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6","Type":"ContainerStarted","Data":"def9270b66a0f6befe6c5f951e7e410c72d1a179948057d8c88b9d05c8b61cdc"} Sep 30 17:02:27 crc kubenswrapper[4818]: I0930 17:02:27.422881 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-4p4hg" event={"ID":"3712d08f-58c2-4fff-9d9f-443ba37fc9c0","Type":"ContainerStarted","Data":"a01784e56da058035db7331790bae266fc6eaa576f0215e9fe4e64e97832a4f4"} Sep 30 17:02:27 crc kubenswrapper[4818]: I0930 17:02:27.422961 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-4p4hg" event={"ID":"3712d08f-58c2-4fff-9d9f-443ba37fc9c0","Type":"ContainerStarted","Data":"327de0129786537619d93a48f9d30a484316d4d194ecc0d7850dd407e5d27aee"} Sep 30 17:02:27 crc kubenswrapper[4818]: I0930 17:02:27.427604 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-48kp2" event={"ID":"0e764b4c-fdff-4e32-bdec-17a1d80acb31","Type":"ContainerStarted","Data":"e9b089ba291391a238d8952a5a6b6b0588d42803673e3d3f9d6ecfc458cdd10f"} Sep 30 17:02:27 crc kubenswrapper[4818]: I0930 17:02:27.429557 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgbw4" event={"ID":"0a994ace-967b-4a52-9289-6b59bb9b699b","Type":"ContainerStarted","Data":"6f33d90e0211f988de2b2ee67b7d4438a470c8b206c58e9183c34102309fcdc3"} Sep 30 17:02:27 crc kubenswrapper[4818]: I0930 17:02:27.433422 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d4v8q" event={"ID":"f9458d30-6f34-4e65-94a3-dc5787773b24","Type":"ContainerStarted","Data":"acaf311fc4ece5a982cd30064a8cdcef64e5c66e10c2d2c38e23978e11665e87"} Sep 30 17:02:28 crc kubenswrapper[4818]: I0930 17:02:28.447587 4818 generic.go:334] "Generic (PLEG): container finished" podID="0e764b4c-fdff-4e32-bdec-17a1d80acb31" containerID="e9b089ba291391a238d8952a5a6b6b0588d42803673e3d3f9d6ecfc458cdd10f" exitCode=0 Sep 30 17:02:28 crc kubenswrapper[4818]: I0930 17:02:28.447646 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-48kp2" event={"ID":"0e764b4c-fdff-4e32-bdec-17a1d80acb31","Type":"ContainerDied","Data":"e9b089ba291391a238d8952a5a6b6b0588d42803673e3d3f9d6ecfc458cdd10f"} Sep 30 17:02:28 crc kubenswrapper[4818]: I0930 17:02:28.456989 4818 generic.go:334] "Generic (PLEG): container finished" podID="79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e" containerID="09d212b0894f13a46ae3ae2f0f568ec2d3138c8b492403288c65ebc48549f104" exitCode=0 Sep 30 17:02:28 crc kubenswrapper[4818]: I0930 17:02:28.457158 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-28tlv" event={"ID":"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e","Type":"ContainerDied","Data":"09d212b0894f13a46ae3ae2f0f568ec2d3138c8b492403288c65ebc48549f104"} Sep 30 17:02:28 crc kubenswrapper[4818]: I0930 17:02:28.460578 4818 generic.go:334] "Generic (PLEG): container finished" podID="30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6" containerID="def9270b66a0f6befe6c5f951e7e410c72d1a179948057d8c88b9d05c8b61cdc" exitCode=0 Sep 30 17:02:28 crc kubenswrapper[4818]: I0930 17:02:28.460676 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjn54" event={"ID":"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6","Type":"ContainerDied","Data":"def9270b66a0f6befe6c5f951e7e410c72d1a179948057d8c88b9d05c8b61cdc"} Sep 30 17:02:28 crc kubenswrapper[4818]: I0930 17:02:28.464714 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-4p4hg" event={"ID":"3712d08f-58c2-4fff-9d9f-443ba37fc9c0","Type":"ContainerStarted","Data":"b188524a442b2ce612079a62f3c70e07cb709d322bd8ac7b5e2fd7ba3b08e6ed"} Sep 30 17:02:28 crc kubenswrapper[4818]: I0930 17:02:28.467435 4818 generic.go:334] "Generic (PLEG): container finished" podID="0a994ace-967b-4a52-9289-6b59bb9b699b" containerID="6f33d90e0211f988de2b2ee67b7d4438a470c8b206c58e9183c34102309fcdc3" exitCode=0 Sep 30 17:02:28 crc kubenswrapper[4818]: I0930 17:02:28.467523 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgbw4" event={"ID":"0a994ace-967b-4a52-9289-6b59bb9b699b","Type":"ContainerDied","Data":"6f33d90e0211f988de2b2ee67b7d4438a470c8b206c58e9183c34102309fcdc3"} Sep 30 17:02:28 crc kubenswrapper[4818]: I0930 17:02:28.477426 4818 generic.go:334] "Generic (PLEG): container finished" podID="f9458d30-6f34-4e65-94a3-dc5787773b24" containerID="acaf311fc4ece5a982cd30064a8cdcef64e5c66e10c2d2c38e23978e11665e87" exitCode=0 Sep 30 17:02:28 crc kubenswrapper[4818]: I0930 17:02:28.477464 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d4v8q" event={"ID":"f9458d30-6f34-4e65-94a3-dc5787773b24","Type":"ContainerDied","Data":"acaf311fc4ece5a982cd30064a8cdcef64e5c66e10c2d2c38e23978e11665e87"} Sep 30 17:02:28 crc kubenswrapper[4818]: I0930 17:02:28.532284 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-4p4hg" podStartSLOduration=169.532265712 podStartE2EDuration="2m49.532265712s" podCreationTimestamp="2025-09-30 16:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:02:28.53009778 +0000 UTC m=+195.284369626" watchObservedRunningTime="2025-09-30 17:02:28.532265712 +0000 UTC m=+195.286537548" Sep 30 17:02:29 crc kubenswrapper[4818]: I0930 17:02:29.484958 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-28tlv" event={"ID":"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e","Type":"ContainerStarted","Data":"bf192f9e98e085956984b811e319edf3973af02e55777c492b5e0915d55c7d69"} Sep 30 17:02:29 crc kubenswrapper[4818]: I0930 17:02:29.489260 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d4v8q" event={"ID":"f9458d30-6f34-4e65-94a3-dc5787773b24","Type":"ContainerStarted","Data":"324008f665adfa1c4f900af7dfb35228c3f6aa8d4163663e39bcd9e3f85b519d"} Sep 30 17:02:29 crc kubenswrapper[4818]: I0930 17:02:29.492624 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-48kp2" event={"ID":"0e764b4c-fdff-4e32-bdec-17a1d80acb31","Type":"ContainerStarted","Data":"ce496550a7d1742f8c50085b97fe413ed8ddf6fe5d9cdc9d855ed09c18666f56"} Sep 30 17:02:29 crc kubenswrapper[4818]: I0930 17:02:29.498626 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgbw4" event={"ID":"0a994ace-967b-4a52-9289-6b59bb9b699b","Type":"ContainerStarted","Data":"324f80fcf1b541ba8cbb09721fc956b303b44f328d2595fa07a4c6298e4eeb06"} Sep 30 17:02:29 crc kubenswrapper[4818]: I0930 17:02:29.507107 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-28tlv" podStartSLOduration=2.547465795 podStartE2EDuration="36.507089313s" podCreationTimestamp="2025-09-30 17:01:53 +0000 UTC" firstStartedPulling="2025-09-30 17:01:55.078094931 +0000 UTC m=+161.832366757" lastFinishedPulling="2025-09-30 17:02:29.037718459 +0000 UTC m=+195.791990275" observedRunningTime="2025-09-30 17:02:29.504628653 +0000 UTC m=+196.258900479" watchObservedRunningTime="2025-09-30 17:02:29.507089313 +0000 UTC m=+196.261361129" Sep 30 17:02:29 crc kubenswrapper[4818]: I0930 17:02:29.524591 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-d4v8q" podStartSLOduration=1.721568408 podStartE2EDuration="34.524571809s" podCreationTimestamp="2025-09-30 17:01:55 +0000 UTC" firstStartedPulling="2025-09-30 17:01:56.138056046 +0000 UTC m=+162.892327852" lastFinishedPulling="2025-09-30 17:02:28.941059407 +0000 UTC m=+195.695331253" observedRunningTime="2025-09-30 17:02:29.523413486 +0000 UTC m=+196.277685312" watchObservedRunningTime="2025-09-30 17:02:29.524571809 +0000 UTC m=+196.278843625" Sep 30 17:02:29 crc kubenswrapper[4818]: I0930 17:02:29.543109 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xgbw4" podStartSLOduration=7.97280285 podStartE2EDuration="33.543092374s" podCreationTimestamp="2025-09-30 17:01:56 +0000 UTC" firstStartedPulling="2025-09-30 17:02:03.411614412 +0000 UTC m=+170.165886228" lastFinishedPulling="2025-09-30 17:02:28.981903916 +0000 UTC m=+195.736175752" observedRunningTime="2025-09-30 17:02:29.542558819 +0000 UTC m=+196.296830635" watchObservedRunningTime="2025-09-30 17:02:29.543092374 +0000 UTC m=+196.297364190" Sep 30 17:02:29 crc kubenswrapper[4818]: I0930 17:02:29.562402 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-48kp2" podStartSLOduration=3.757494438 podStartE2EDuration="34.562385991s" podCreationTimestamp="2025-09-30 17:01:55 +0000 UTC" firstStartedPulling="2025-09-30 17:01:58.226593189 +0000 UTC m=+164.980865005" lastFinishedPulling="2025-09-30 17:02:29.031484712 +0000 UTC m=+195.785756558" observedRunningTime="2025-09-30 17:02:29.559383516 +0000 UTC m=+196.313655332" watchObservedRunningTime="2025-09-30 17:02:29.562385991 +0000 UTC m=+196.316657807" Sep 30 17:02:30 crc kubenswrapper[4818]: I0930 17:02:30.505800 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjn54" event={"ID":"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6","Type":"ContainerStarted","Data":"9d98c65fbae3028bb8bcdec5bf434735c4a85a7f0444af97f8efe7772a1df331"} Sep 30 17:02:30 crc kubenswrapper[4818]: I0930 17:02:30.530457 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pjn54" podStartSLOduration=8.508714914 podStartE2EDuration="34.530428113s" podCreationTimestamp="2025-09-30 17:01:56 +0000 UTC" firstStartedPulling="2025-09-30 17:02:03.408897005 +0000 UTC m=+170.163168831" lastFinishedPulling="2025-09-30 17:02:29.430610214 +0000 UTC m=+196.184882030" observedRunningTime="2025-09-30 17:02:30.529311141 +0000 UTC m=+197.283582967" watchObservedRunningTime="2025-09-30 17:02:30.530428113 +0000 UTC m=+197.284699959" Sep 30 17:02:33 crc kubenswrapper[4818]: I0930 17:02:33.570084 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-28tlv" Sep 30 17:02:33 crc kubenswrapper[4818]: I0930 17:02:33.570373 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-28tlv" Sep 30 17:02:33 crc kubenswrapper[4818]: I0930 17:02:33.846807 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-28tlv" Sep 30 17:02:34 crc kubenswrapper[4818]: I0930 17:02:34.571881 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-28tlv" Sep 30 17:02:35 crc kubenswrapper[4818]: I0930 17:02:35.754992 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-d4v8q" Sep 30 17:02:35 crc kubenswrapper[4818]: I0930 17:02:35.755457 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-d4v8q" Sep 30 17:02:35 crc kubenswrapper[4818]: I0930 17:02:35.825428 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-d4v8q" Sep 30 17:02:36 crc kubenswrapper[4818]: I0930 17:02:36.188952 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-48kp2" Sep 30 17:02:36 crc kubenswrapper[4818]: I0930 17:02:36.189004 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-48kp2" Sep 30 17:02:36 crc kubenswrapper[4818]: I0930 17:02:36.252845 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-48kp2" Sep 30 17:02:36 crc kubenswrapper[4818]: I0930 17:02:36.596329 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-48kp2" Sep 30 17:02:36 crc kubenswrapper[4818]: I0930 17:02:36.600519 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-d4v8q" Sep 30 17:02:36 crc kubenswrapper[4818]: I0930 17:02:36.769454 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pjn54" Sep 30 17:02:36 crc kubenswrapper[4818]: I0930 17:02:36.770716 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pjn54" Sep 30 17:02:36 crc kubenswrapper[4818]: I0930 17:02:36.835050 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pjn54" Sep 30 17:02:37 crc kubenswrapper[4818]: I0930 17:02:37.212158 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xgbw4" Sep 30 17:02:37 crc kubenswrapper[4818]: I0930 17:02:37.212236 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xgbw4" Sep 30 17:02:37 crc kubenswrapper[4818]: I0930 17:02:37.270683 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xgbw4" Sep 30 17:02:37 crc kubenswrapper[4818]: I0930 17:02:37.590007 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xgbw4" Sep 30 17:02:37 crc kubenswrapper[4818]: I0930 17:02:37.605123 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pjn54" Sep 30 17:02:38 crc kubenswrapper[4818]: I0930 17:02:38.221068 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-48kp2"] Sep 30 17:02:38 crc kubenswrapper[4818]: I0930 17:02:38.556642 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-44qds" event={"ID":"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204","Type":"ContainerStarted","Data":"0c51c03d99ae9b4cc15d85bb44f61dd2fcbc4b68b97f72d68ea1ff042f4714cb"} Sep 30 17:02:38 crc kubenswrapper[4818]: I0930 17:02:38.558989 4818 generic.go:334] "Generic (PLEG): container finished" podID="6e171b72-1e48-4bf2-8c9d-5a2ce38f4588" containerID="baf08bcdfca481d63c02fcdbcc84a137715310c3e4e3dc538c2a993d02028cf2" exitCode=0 Sep 30 17:02:38 crc kubenswrapper[4818]: I0930 17:02:38.559060 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qv9v9" event={"ID":"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588","Type":"ContainerDied","Data":"baf08bcdfca481d63c02fcdbcc84a137715310c3e4e3dc538c2a993d02028cf2"} Sep 30 17:02:38 crc kubenswrapper[4818]: I0930 17:02:38.568712 4818 generic.go:334] "Generic (PLEG): container finished" podID="ea56cc9d-63e6-4e68-8f01-d93c321c6854" containerID="7d228db9e37a19c9874d2cb0006f1d03098ddd577a8a342fc1bf1c7528f5e96f" exitCode=0 Sep 30 17:02:38 crc kubenswrapper[4818]: I0930 17:02:38.568760 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmw5q" event={"ID":"ea56cc9d-63e6-4e68-8f01-d93c321c6854","Type":"ContainerDied","Data":"7d228db9e37a19c9874d2cb0006f1d03098ddd577a8a342fc1bf1c7528f5e96f"} Sep 30 17:02:38 crc kubenswrapper[4818]: I0930 17:02:38.569137 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-48kp2" podUID="0e764b4c-fdff-4e32-bdec-17a1d80acb31" containerName="registry-server" containerID="cri-o://ce496550a7d1742f8c50085b97fe413ed8ddf6fe5d9cdc9d855ed09c18666f56" gracePeriod=2 Sep 30 17:02:39 crc kubenswrapper[4818]: I0930 17:02:39.576869 4818 generic.go:334] "Generic (PLEG): container finished" podID="a59d1ab7-3891-4385-9f6e-8ca1a0bdf204" containerID="0c51c03d99ae9b4cc15d85bb44f61dd2fcbc4b68b97f72d68ea1ff042f4714cb" exitCode=0 Sep 30 17:02:39 crc kubenswrapper[4818]: I0930 17:02:39.577010 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-44qds" event={"ID":"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204","Type":"ContainerDied","Data":"0c51c03d99ae9b4cc15d85bb44f61dd2fcbc4b68b97f72d68ea1ff042f4714cb"} Sep 30 17:02:39 crc kubenswrapper[4818]: I0930 17:02:39.582142 4818 generic.go:334] "Generic (PLEG): container finished" podID="0e764b4c-fdff-4e32-bdec-17a1d80acb31" containerID="ce496550a7d1742f8c50085b97fe413ed8ddf6fe5d9cdc9d855ed09c18666f56" exitCode=0 Sep 30 17:02:39 crc kubenswrapper[4818]: I0930 17:02:39.582209 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-48kp2" event={"ID":"0e764b4c-fdff-4e32-bdec-17a1d80acb31","Type":"ContainerDied","Data":"ce496550a7d1742f8c50085b97fe413ed8ddf6fe5d9cdc9d855ed09c18666f56"} Sep 30 17:02:39 crc kubenswrapper[4818]: I0930 17:02:39.787996 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-48kp2" Sep 30 17:02:39 crc kubenswrapper[4818]: I0930 17:02:39.839391 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jrls2\" (UniqueName: \"kubernetes.io/projected/0e764b4c-fdff-4e32-bdec-17a1d80acb31-kube-api-access-jrls2\") pod \"0e764b4c-fdff-4e32-bdec-17a1d80acb31\" (UID: \"0e764b4c-fdff-4e32-bdec-17a1d80acb31\") " Sep 30 17:02:39 crc kubenswrapper[4818]: I0930 17:02:39.839427 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e764b4c-fdff-4e32-bdec-17a1d80acb31-catalog-content\") pod \"0e764b4c-fdff-4e32-bdec-17a1d80acb31\" (UID: \"0e764b4c-fdff-4e32-bdec-17a1d80acb31\") " Sep 30 17:02:39 crc kubenswrapper[4818]: I0930 17:02:39.839455 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e764b4c-fdff-4e32-bdec-17a1d80acb31-utilities\") pod \"0e764b4c-fdff-4e32-bdec-17a1d80acb31\" (UID: \"0e764b4c-fdff-4e32-bdec-17a1d80acb31\") " Sep 30 17:02:39 crc kubenswrapper[4818]: I0930 17:02:39.840297 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e764b4c-fdff-4e32-bdec-17a1d80acb31-utilities" (OuterVolumeSpecName: "utilities") pod "0e764b4c-fdff-4e32-bdec-17a1d80acb31" (UID: "0e764b4c-fdff-4e32-bdec-17a1d80acb31"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:02:39 crc kubenswrapper[4818]: I0930 17:02:39.845627 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e764b4c-fdff-4e32-bdec-17a1d80acb31-kube-api-access-jrls2" (OuterVolumeSpecName: "kube-api-access-jrls2") pod "0e764b4c-fdff-4e32-bdec-17a1d80acb31" (UID: "0e764b4c-fdff-4e32-bdec-17a1d80acb31"). InnerVolumeSpecName "kube-api-access-jrls2". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:02:39 crc kubenswrapper[4818]: I0930 17:02:39.870878 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e764b4c-fdff-4e32-bdec-17a1d80acb31-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0e764b4c-fdff-4e32-bdec-17a1d80acb31" (UID: "0e764b4c-fdff-4e32-bdec-17a1d80acb31"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:02:39 crc kubenswrapper[4818]: I0930 17:02:39.941115 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jrls2\" (UniqueName: \"kubernetes.io/projected/0e764b4c-fdff-4e32-bdec-17a1d80acb31-kube-api-access-jrls2\") on node \"crc\" DevicePath \"\"" Sep 30 17:02:39 crc kubenswrapper[4818]: I0930 17:02:39.941158 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e764b4c-fdff-4e32-bdec-17a1d80acb31-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 17:02:39 crc kubenswrapper[4818]: I0930 17:02:39.941170 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e764b4c-fdff-4e32-bdec-17a1d80acb31-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 17:02:40 crc kubenswrapper[4818]: I0930 17:02:40.590210 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-48kp2" Sep 30 17:02:40 crc kubenswrapper[4818]: I0930 17:02:40.590162 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-48kp2" event={"ID":"0e764b4c-fdff-4e32-bdec-17a1d80acb31","Type":"ContainerDied","Data":"4d111d8b825ddfa164b86d4341596dd1949accfa9f8426033ac587fb34618346"} Sep 30 17:02:40 crc kubenswrapper[4818]: I0930 17:02:40.590634 4818 scope.go:117] "RemoveContainer" containerID="ce496550a7d1742f8c50085b97fe413ed8ddf6fe5d9cdc9d855ed09c18666f56" Sep 30 17:02:40 crc kubenswrapper[4818]: I0930 17:02:40.595383 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qv9v9" event={"ID":"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588","Type":"ContainerStarted","Data":"f477bdc4fc4bed7e065bfd45b879a22bbfec5955d391b76c8d21baf5030677b1"} Sep 30 17:02:40 crc kubenswrapper[4818]: I0930 17:02:40.599397 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmw5q" event={"ID":"ea56cc9d-63e6-4e68-8f01-d93c321c6854","Type":"ContainerStarted","Data":"acb2f324546b15c64e9de40741b38d62bab8b6dda9e7cfe6d30df82c2c4a7be5"} Sep 30 17:02:40 crc kubenswrapper[4818]: I0930 17:02:40.607016 4818 scope.go:117] "RemoveContainer" containerID="e9b089ba291391a238d8952a5a6b6b0588d42803673e3d3f9d6ecfc458cdd10f" Sep 30 17:02:40 crc kubenswrapper[4818]: I0930 17:02:40.621849 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xgbw4"] Sep 30 17:02:40 crc kubenswrapper[4818]: I0930 17:02:40.622084 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xgbw4" podUID="0a994ace-967b-4a52-9289-6b59bb9b699b" containerName="registry-server" containerID="cri-o://324f80fcf1b541ba8cbb09721fc956b303b44f328d2595fa07a4c6298e4eeb06" gracePeriod=2 Sep 30 17:02:40 crc kubenswrapper[4818]: I0930 17:02:40.624180 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qv9v9" podStartSLOduration=3.6252389799999998 podStartE2EDuration="47.624162249s" podCreationTimestamp="2025-09-30 17:01:53 +0000 UTC" firstStartedPulling="2025-09-30 17:01:55.060739979 +0000 UTC m=+161.815011805" lastFinishedPulling="2025-09-30 17:02:39.059663248 +0000 UTC m=+205.813935074" observedRunningTime="2025-09-30 17:02:40.620981258 +0000 UTC m=+207.375253074" watchObservedRunningTime="2025-09-30 17:02:40.624162249 +0000 UTC m=+207.378434065" Sep 30 17:02:40 crc kubenswrapper[4818]: I0930 17:02:40.631780 4818 scope.go:117] "RemoveContainer" containerID="48772190a0dddac2d0676c673f4ca962e48c16e9e4a53d9c908fd48d006d6bfe" Sep 30 17:02:40 crc kubenswrapper[4818]: I0930 17:02:40.638678 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-48kp2"] Sep 30 17:02:40 crc kubenswrapper[4818]: I0930 17:02:40.645728 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-48kp2"] Sep 30 17:02:40 crc kubenswrapper[4818]: I0930 17:02:40.666485 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mmw5q" podStartSLOduration=3.372710097 podStartE2EDuration="47.666467666s" podCreationTimestamp="2025-09-30 17:01:53 +0000 UTC" firstStartedPulling="2025-09-30 17:01:55.117464808 +0000 UTC m=+161.871736624" lastFinishedPulling="2025-09-30 17:02:39.411222347 +0000 UTC m=+206.165494193" observedRunningTime="2025-09-30 17:02:40.665167879 +0000 UTC m=+207.419439695" watchObservedRunningTime="2025-09-30 17:02:40.666467666 +0000 UTC m=+207.420739502" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.049873 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e764b4c-fdff-4e32-bdec-17a1d80acb31" path="/var/lib/kubelet/pods/0e764b4c-fdff-4e32-bdec-17a1d80acb31/volumes" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.460368 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xgbw4" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.572582 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjhp9\" (UniqueName: \"kubernetes.io/projected/0a994ace-967b-4a52-9289-6b59bb9b699b-kube-api-access-fjhp9\") pod \"0a994ace-967b-4a52-9289-6b59bb9b699b\" (UID: \"0a994ace-967b-4a52-9289-6b59bb9b699b\") " Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.572720 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a994ace-967b-4a52-9289-6b59bb9b699b-catalog-content\") pod \"0a994ace-967b-4a52-9289-6b59bb9b699b\" (UID: \"0a994ace-967b-4a52-9289-6b59bb9b699b\") " Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.572764 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a994ace-967b-4a52-9289-6b59bb9b699b-utilities\") pod \"0a994ace-967b-4a52-9289-6b59bb9b699b\" (UID: \"0a994ace-967b-4a52-9289-6b59bb9b699b\") " Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.573952 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a994ace-967b-4a52-9289-6b59bb9b699b-utilities" (OuterVolumeSpecName: "utilities") pod "0a994ace-967b-4a52-9289-6b59bb9b699b" (UID: "0a994ace-967b-4a52-9289-6b59bb9b699b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.583188 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a994ace-967b-4a52-9289-6b59bb9b699b-kube-api-access-fjhp9" (OuterVolumeSpecName: "kube-api-access-fjhp9") pod "0a994ace-967b-4a52-9289-6b59bb9b699b" (UID: "0a994ace-967b-4a52-9289-6b59bb9b699b"). InnerVolumeSpecName "kube-api-access-fjhp9". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.615730 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-44qds" event={"ID":"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204","Type":"ContainerStarted","Data":"607c85da60f3aa78bb6993861caa806330e0376ffb4573e75a38e8388d5ab1be"} Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.618348 4818 generic.go:334] "Generic (PLEG): container finished" podID="0a994ace-967b-4a52-9289-6b59bb9b699b" containerID="324f80fcf1b541ba8cbb09721fc956b303b44f328d2595fa07a4c6298e4eeb06" exitCode=0 Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.618412 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgbw4" event={"ID":"0a994ace-967b-4a52-9289-6b59bb9b699b","Type":"ContainerDied","Data":"324f80fcf1b541ba8cbb09721fc956b303b44f328d2595fa07a4c6298e4eeb06"} Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.618435 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgbw4" event={"ID":"0a994ace-967b-4a52-9289-6b59bb9b699b","Type":"ContainerDied","Data":"fb4219c6b4405a0cd96d348e239a0072162472e1115477f4843f14c6bd1a8731"} Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.618455 4818 scope.go:117] "RemoveContainer" containerID="324f80fcf1b541ba8cbb09721fc956b303b44f328d2595fa07a4c6298e4eeb06" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.618726 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xgbw4" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.644623 4818 scope.go:117] "RemoveContainer" containerID="6f33d90e0211f988de2b2ee67b7d4438a470c8b206c58e9183c34102309fcdc3" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.647073 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-44qds" podStartSLOduration=2.8917567699999998 podStartE2EDuration="49.647054006s" podCreationTimestamp="2025-09-30 17:01:53 +0000 UTC" firstStartedPulling="2025-09-30 17:01:55.030938564 +0000 UTC m=+161.785210390" lastFinishedPulling="2025-09-30 17:02:41.78623581 +0000 UTC m=+208.540507626" observedRunningTime="2025-09-30 17:02:42.640024396 +0000 UTC m=+209.394296222" watchObservedRunningTime="2025-09-30 17:02:42.647054006 +0000 UTC m=+209.401325832" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.665351 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a994ace-967b-4a52-9289-6b59bb9b699b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0a994ace-967b-4a52-9289-6b59bb9b699b" (UID: "0a994ace-967b-4a52-9289-6b59bb9b699b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.672761 4818 scope.go:117] "RemoveContainer" containerID="a89dd9121147ba99a0a53182610c0dae475d75a9b2a2cf7d11073d69316237fb" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.674524 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjhp9\" (UniqueName: \"kubernetes.io/projected/0a994ace-967b-4a52-9289-6b59bb9b699b-kube-api-access-fjhp9\") on node \"crc\" DevicePath \"\"" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.674547 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a994ace-967b-4a52-9289-6b59bb9b699b-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.674559 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a994ace-967b-4a52-9289-6b59bb9b699b-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.699014 4818 scope.go:117] "RemoveContainer" containerID="324f80fcf1b541ba8cbb09721fc956b303b44f328d2595fa07a4c6298e4eeb06" Sep 30 17:02:42 crc kubenswrapper[4818]: E0930 17:02:42.699644 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"324f80fcf1b541ba8cbb09721fc956b303b44f328d2595fa07a4c6298e4eeb06\": container with ID starting with 324f80fcf1b541ba8cbb09721fc956b303b44f328d2595fa07a4c6298e4eeb06 not found: ID does not exist" containerID="324f80fcf1b541ba8cbb09721fc956b303b44f328d2595fa07a4c6298e4eeb06" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.699704 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"324f80fcf1b541ba8cbb09721fc956b303b44f328d2595fa07a4c6298e4eeb06"} err="failed to get container status \"324f80fcf1b541ba8cbb09721fc956b303b44f328d2595fa07a4c6298e4eeb06\": rpc error: code = NotFound desc = could not find container \"324f80fcf1b541ba8cbb09721fc956b303b44f328d2595fa07a4c6298e4eeb06\": container with ID starting with 324f80fcf1b541ba8cbb09721fc956b303b44f328d2595fa07a4c6298e4eeb06 not found: ID does not exist" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.699768 4818 scope.go:117] "RemoveContainer" containerID="6f33d90e0211f988de2b2ee67b7d4438a470c8b206c58e9183c34102309fcdc3" Sep 30 17:02:42 crc kubenswrapper[4818]: E0930 17:02:42.700318 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f33d90e0211f988de2b2ee67b7d4438a470c8b206c58e9183c34102309fcdc3\": container with ID starting with 6f33d90e0211f988de2b2ee67b7d4438a470c8b206c58e9183c34102309fcdc3 not found: ID does not exist" containerID="6f33d90e0211f988de2b2ee67b7d4438a470c8b206c58e9183c34102309fcdc3" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.700432 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f33d90e0211f988de2b2ee67b7d4438a470c8b206c58e9183c34102309fcdc3"} err="failed to get container status \"6f33d90e0211f988de2b2ee67b7d4438a470c8b206c58e9183c34102309fcdc3\": rpc error: code = NotFound desc = could not find container \"6f33d90e0211f988de2b2ee67b7d4438a470c8b206c58e9183c34102309fcdc3\": container with ID starting with 6f33d90e0211f988de2b2ee67b7d4438a470c8b206c58e9183c34102309fcdc3 not found: ID does not exist" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.700542 4818 scope.go:117] "RemoveContainer" containerID="a89dd9121147ba99a0a53182610c0dae475d75a9b2a2cf7d11073d69316237fb" Sep 30 17:02:42 crc kubenswrapper[4818]: E0930 17:02:42.701202 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a89dd9121147ba99a0a53182610c0dae475d75a9b2a2cf7d11073d69316237fb\": container with ID starting with a89dd9121147ba99a0a53182610c0dae475d75a9b2a2cf7d11073d69316237fb not found: ID does not exist" containerID="a89dd9121147ba99a0a53182610c0dae475d75a9b2a2cf7d11073d69316237fb" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.701252 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a89dd9121147ba99a0a53182610c0dae475d75a9b2a2cf7d11073d69316237fb"} err="failed to get container status \"a89dd9121147ba99a0a53182610c0dae475d75a9b2a2cf7d11073d69316237fb\": rpc error: code = NotFound desc = could not find container \"a89dd9121147ba99a0a53182610c0dae475d75a9b2a2cf7d11073d69316237fb\": container with ID starting with a89dd9121147ba99a0a53182610c0dae475d75a9b2a2cf7d11073d69316237fb not found: ID does not exist" Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.957302 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xgbw4"] Sep 30 17:02:42 crc kubenswrapper[4818]: I0930 17:02:42.961297 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xgbw4"] Sep 30 17:02:43 crc kubenswrapper[4818]: I0930 17:02:43.782351 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-44qds" Sep 30 17:02:43 crc kubenswrapper[4818]: I0930 17:02:43.782668 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-44qds" Sep 30 17:02:43 crc kubenswrapper[4818]: I0930 17:02:43.828027 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-44qds" Sep 30 17:02:44 crc kubenswrapper[4818]: I0930 17:02:44.000946 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mmw5q" Sep 30 17:02:44 crc kubenswrapper[4818]: I0930 17:02:44.001009 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mmw5q" Sep 30 17:02:44 crc kubenswrapper[4818]: I0930 17:02:44.031572 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a994ace-967b-4a52-9289-6b59bb9b699b" path="/var/lib/kubelet/pods/0a994ace-967b-4a52-9289-6b59bb9b699b/volumes" Sep 30 17:02:44 crc kubenswrapper[4818]: I0930 17:02:44.069622 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mmw5q" Sep 30 17:02:44 crc kubenswrapper[4818]: I0930 17:02:44.173653 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qv9v9" Sep 30 17:02:44 crc kubenswrapper[4818]: I0930 17:02:44.173790 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qv9v9" Sep 30 17:02:44 crc kubenswrapper[4818]: I0930 17:02:44.221716 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qv9v9" Sep 30 17:02:44 crc kubenswrapper[4818]: I0930 17:02:44.680209 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qv9v9" Sep 30 17:02:44 crc kubenswrapper[4818]: I0930 17:02:44.692121 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mmw5q" Sep 30 17:02:45 crc kubenswrapper[4818]: I0930 17:02:45.229520 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qv9v9"] Sep 30 17:02:46 crc kubenswrapper[4818]: I0930 17:02:46.645863 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qv9v9" podUID="6e171b72-1e48-4bf2-8c9d-5a2ce38f4588" containerName="registry-server" containerID="cri-o://f477bdc4fc4bed7e065bfd45b879a22bbfec5955d391b76c8d21baf5030677b1" gracePeriod=2 Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.057241 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qv9v9" Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.237686 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e171b72-1e48-4bf2-8c9d-5a2ce38f4588-utilities\") pod \"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588\" (UID: \"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588\") " Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.238012 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wzxmj\" (UniqueName: \"kubernetes.io/projected/6e171b72-1e48-4bf2-8c9d-5a2ce38f4588-kube-api-access-wzxmj\") pod \"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588\" (UID: \"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588\") " Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.238046 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e171b72-1e48-4bf2-8c9d-5a2ce38f4588-catalog-content\") pod \"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588\" (UID: \"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588\") " Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.239084 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e171b72-1e48-4bf2-8c9d-5a2ce38f4588-utilities" (OuterVolumeSpecName: "utilities") pod "6e171b72-1e48-4bf2-8c9d-5a2ce38f4588" (UID: "6e171b72-1e48-4bf2-8c9d-5a2ce38f4588"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.246425 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e171b72-1e48-4bf2-8c9d-5a2ce38f4588-kube-api-access-wzxmj" (OuterVolumeSpecName: "kube-api-access-wzxmj") pod "6e171b72-1e48-4bf2-8c9d-5a2ce38f4588" (UID: "6e171b72-1e48-4bf2-8c9d-5a2ce38f4588"). InnerVolumeSpecName "kube-api-access-wzxmj". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.299260 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e171b72-1e48-4bf2-8c9d-5a2ce38f4588-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6e171b72-1e48-4bf2-8c9d-5a2ce38f4588" (UID: "6e171b72-1e48-4bf2-8c9d-5a2ce38f4588"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.338810 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wzxmj\" (UniqueName: \"kubernetes.io/projected/6e171b72-1e48-4bf2-8c9d-5a2ce38f4588-kube-api-access-wzxmj\") on node \"crc\" DevicePath \"\"" Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.338848 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e171b72-1e48-4bf2-8c9d-5a2ce38f4588-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.338861 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e171b72-1e48-4bf2-8c9d-5a2ce38f4588-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.625256 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mmw5q"] Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.625715 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mmw5q" podUID="ea56cc9d-63e6-4e68-8f01-d93c321c6854" containerName="registry-server" containerID="cri-o://acb2f324546b15c64e9de40741b38d62bab8b6dda9e7cfe6d30df82c2c4a7be5" gracePeriod=2 Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.653415 4818 generic.go:334] "Generic (PLEG): container finished" podID="6e171b72-1e48-4bf2-8c9d-5a2ce38f4588" containerID="f477bdc4fc4bed7e065bfd45b879a22bbfec5955d391b76c8d21baf5030677b1" exitCode=0 Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.653465 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qv9v9" event={"ID":"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588","Type":"ContainerDied","Data":"f477bdc4fc4bed7e065bfd45b879a22bbfec5955d391b76c8d21baf5030677b1"} Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.653485 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qv9v9" Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.653509 4818 scope.go:117] "RemoveContainer" containerID="f477bdc4fc4bed7e065bfd45b879a22bbfec5955d391b76c8d21baf5030677b1" Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.653495 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qv9v9" event={"ID":"6e171b72-1e48-4bf2-8c9d-5a2ce38f4588","Type":"ContainerDied","Data":"9137c438098995b0efd229acb91bd8670b6974a5eb4042b71056344d924973ad"} Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.684086 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qv9v9"] Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.689491 4818 scope.go:117] "RemoveContainer" containerID="baf08bcdfca481d63c02fcdbcc84a137715310c3e4e3dc538c2a993d02028cf2" Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.691012 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qv9v9"] Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.749558 4818 scope.go:117] "RemoveContainer" containerID="c4f6a2a2667cccdace508263c1f96895ca02b0e53561b160ea9338041e9e90ea" Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.796049 4818 scope.go:117] "RemoveContainer" containerID="f477bdc4fc4bed7e065bfd45b879a22bbfec5955d391b76c8d21baf5030677b1" Sep 30 17:02:47 crc kubenswrapper[4818]: E0930 17:02:47.797212 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f477bdc4fc4bed7e065bfd45b879a22bbfec5955d391b76c8d21baf5030677b1\": container with ID starting with f477bdc4fc4bed7e065bfd45b879a22bbfec5955d391b76c8d21baf5030677b1 not found: ID does not exist" containerID="f477bdc4fc4bed7e065bfd45b879a22bbfec5955d391b76c8d21baf5030677b1" Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.797387 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f477bdc4fc4bed7e065bfd45b879a22bbfec5955d391b76c8d21baf5030677b1"} err="failed to get container status \"f477bdc4fc4bed7e065bfd45b879a22bbfec5955d391b76c8d21baf5030677b1\": rpc error: code = NotFound desc = could not find container \"f477bdc4fc4bed7e065bfd45b879a22bbfec5955d391b76c8d21baf5030677b1\": container with ID starting with f477bdc4fc4bed7e065bfd45b879a22bbfec5955d391b76c8d21baf5030677b1 not found: ID does not exist" Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.797435 4818 scope.go:117] "RemoveContainer" containerID="baf08bcdfca481d63c02fcdbcc84a137715310c3e4e3dc538c2a993d02028cf2" Sep 30 17:02:47 crc kubenswrapper[4818]: E0930 17:02:47.797834 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"baf08bcdfca481d63c02fcdbcc84a137715310c3e4e3dc538c2a993d02028cf2\": container with ID starting with baf08bcdfca481d63c02fcdbcc84a137715310c3e4e3dc538c2a993d02028cf2 not found: ID does not exist" containerID="baf08bcdfca481d63c02fcdbcc84a137715310c3e4e3dc538c2a993d02028cf2" Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.797887 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"baf08bcdfca481d63c02fcdbcc84a137715310c3e4e3dc538c2a993d02028cf2"} err="failed to get container status \"baf08bcdfca481d63c02fcdbcc84a137715310c3e4e3dc538c2a993d02028cf2\": rpc error: code = NotFound desc = could not find container \"baf08bcdfca481d63c02fcdbcc84a137715310c3e4e3dc538c2a993d02028cf2\": container with ID starting with baf08bcdfca481d63c02fcdbcc84a137715310c3e4e3dc538c2a993d02028cf2 not found: ID does not exist" Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.797918 4818 scope.go:117] "RemoveContainer" containerID="c4f6a2a2667cccdace508263c1f96895ca02b0e53561b160ea9338041e9e90ea" Sep 30 17:02:47 crc kubenswrapper[4818]: E0930 17:02:47.798701 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4f6a2a2667cccdace508263c1f96895ca02b0e53561b160ea9338041e9e90ea\": container with ID starting with c4f6a2a2667cccdace508263c1f96895ca02b0e53561b160ea9338041e9e90ea not found: ID does not exist" containerID="c4f6a2a2667cccdace508263c1f96895ca02b0e53561b160ea9338041e9e90ea" Sep 30 17:02:47 crc kubenswrapper[4818]: I0930 17:02:47.798743 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4f6a2a2667cccdace508263c1f96895ca02b0e53561b160ea9338041e9e90ea"} err="failed to get container status \"c4f6a2a2667cccdace508263c1f96895ca02b0e53561b160ea9338041e9e90ea\": rpc error: code = NotFound desc = could not find container \"c4f6a2a2667cccdace508263c1f96895ca02b0e53561b160ea9338041e9e90ea\": container with ID starting with c4f6a2a2667cccdace508263c1f96895ca02b0e53561b160ea9338041e9e90ea not found: ID does not exist" Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.034385 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e171b72-1e48-4bf2-8c9d-5a2ce38f4588" path="/var/lib/kubelet/pods/6e171b72-1e48-4bf2-8c9d-5a2ce38f4588/volumes" Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.064639 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mmw5q" Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.148290 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea56cc9d-63e6-4e68-8f01-d93c321c6854-utilities\") pod \"ea56cc9d-63e6-4e68-8f01-d93c321c6854\" (UID: \"ea56cc9d-63e6-4e68-8f01-d93c321c6854\") " Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.149763 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea56cc9d-63e6-4e68-8f01-d93c321c6854-utilities" (OuterVolumeSpecName: "utilities") pod "ea56cc9d-63e6-4e68-8f01-d93c321c6854" (UID: "ea56cc9d-63e6-4e68-8f01-d93c321c6854"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.249025 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea56cc9d-63e6-4e68-8f01-d93c321c6854-catalog-content\") pod \"ea56cc9d-63e6-4e68-8f01-d93c321c6854\" (UID: \"ea56cc9d-63e6-4e68-8f01-d93c321c6854\") " Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.249093 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5f5fp\" (UniqueName: \"kubernetes.io/projected/ea56cc9d-63e6-4e68-8f01-d93c321c6854-kube-api-access-5f5fp\") pod \"ea56cc9d-63e6-4e68-8f01-d93c321c6854\" (UID: \"ea56cc9d-63e6-4e68-8f01-d93c321c6854\") " Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.249392 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea56cc9d-63e6-4e68-8f01-d93c321c6854-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.256802 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea56cc9d-63e6-4e68-8f01-d93c321c6854-kube-api-access-5f5fp" (OuterVolumeSpecName: "kube-api-access-5f5fp") pod "ea56cc9d-63e6-4e68-8f01-d93c321c6854" (UID: "ea56cc9d-63e6-4e68-8f01-d93c321c6854"). InnerVolumeSpecName "kube-api-access-5f5fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.301420 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea56cc9d-63e6-4e68-8f01-d93c321c6854-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ea56cc9d-63e6-4e68-8f01-d93c321c6854" (UID: "ea56cc9d-63e6-4e68-8f01-d93c321c6854"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.353226 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea56cc9d-63e6-4e68-8f01-d93c321c6854-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.353292 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5f5fp\" (UniqueName: \"kubernetes.io/projected/ea56cc9d-63e6-4e68-8f01-d93c321c6854-kube-api-access-5f5fp\") on node \"crc\" DevicePath \"\"" Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.664569 4818 generic.go:334] "Generic (PLEG): container finished" podID="ea56cc9d-63e6-4e68-8f01-d93c321c6854" containerID="acb2f324546b15c64e9de40741b38d62bab8b6dda9e7cfe6d30df82c2c4a7be5" exitCode=0 Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.664625 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmw5q" event={"ID":"ea56cc9d-63e6-4e68-8f01-d93c321c6854","Type":"ContainerDied","Data":"acb2f324546b15c64e9de40741b38d62bab8b6dda9e7cfe6d30df82c2c4a7be5"} Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.665044 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmw5q" event={"ID":"ea56cc9d-63e6-4e68-8f01-d93c321c6854","Type":"ContainerDied","Data":"6a13bc0b88cbbb959abe4ffeabaf1aff2e43078126fa8ee835013cc8afebfc0d"} Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.665075 4818 scope.go:117] "RemoveContainer" containerID="acb2f324546b15c64e9de40741b38d62bab8b6dda9e7cfe6d30df82c2c4a7be5" Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.664701 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mmw5q" Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.692638 4818 scope.go:117] "RemoveContainer" containerID="7d228db9e37a19c9874d2cb0006f1d03098ddd577a8a342fc1bf1c7528f5e96f" Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.708894 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mmw5q"] Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.713786 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mmw5q"] Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.735823 4818 scope.go:117] "RemoveContainer" containerID="fff003e0a3f0a09cc3cf0ab11f1548262811ab8ef41ccef8a6fe4936011bfbc8" Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.753762 4818 scope.go:117] "RemoveContainer" containerID="acb2f324546b15c64e9de40741b38d62bab8b6dda9e7cfe6d30df82c2c4a7be5" Sep 30 17:02:48 crc kubenswrapper[4818]: E0930 17:02:48.754336 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"acb2f324546b15c64e9de40741b38d62bab8b6dda9e7cfe6d30df82c2c4a7be5\": container with ID starting with acb2f324546b15c64e9de40741b38d62bab8b6dda9e7cfe6d30df82c2c4a7be5 not found: ID does not exist" containerID="acb2f324546b15c64e9de40741b38d62bab8b6dda9e7cfe6d30df82c2c4a7be5" Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.754423 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"acb2f324546b15c64e9de40741b38d62bab8b6dda9e7cfe6d30df82c2c4a7be5"} err="failed to get container status \"acb2f324546b15c64e9de40741b38d62bab8b6dda9e7cfe6d30df82c2c4a7be5\": rpc error: code = NotFound desc = could not find container \"acb2f324546b15c64e9de40741b38d62bab8b6dda9e7cfe6d30df82c2c4a7be5\": container with ID starting with acb2f324546b15c64e9de40741b38d62bab8b6dda9e7cfe6d30df82c2c4a7be5 not found: ID does not exist" Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.754474 4818 scope.go:117] "RemoveContainer" containerID="7d228db9e37a19c9874d2cb0006f1d03098ddd577a8a342fc1bf1c7528f5e96f" Sep 30 17:02:48 crc kubenswrapper[4818]: E0930 17:02:48.755063 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d228db9e37a19c9874d2cb0006f1d03098ddd577a8a342fc1bf1c7528f5e96f\": container with ID starting with 7d228db9e37a19c9874d2cb0006f1d03098ddd577a8a342fc1bf1c7528f5e96f not found: ID does not exist" containerID="7d228db9e37a19c9874d2cb0006f1d03098ddd577a8a342fc1bf1c7528f5e96f" Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.755121 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d228db9e37a19c9874d2cb0006f1d03098ddd577a8a342fc1bf1c7528f5e96f"} err="failed to get container status \"7d228db9e37a19c9874d2cb0006f1d03098ddd577a8a342fc1bf1c7528f5e96f\": rpc error: code = NotFound desc = could not find container \"7d228db9e37a19c9874d2cb0006f1d03098ddd577a8a342fc1bf1c7528f5e96f\": container with ID starting with 7d228db9e37a19c9874d2cb0006f1d03098ddd577a8a342fc1bf1c7528f5e96f not found: ID does not exist" Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.755157 4818 scope.go:117] "RemoveContainer" containerID="fff003e0a3f0a09cc3cf0ab11f1548262811ab8ef41ccef8a6fe4936011bfbc8" Sep 30 17:02:48 crc kubenswrapper[4818]: E0930 17:02:48.755540 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fff003e0a3f0a09cc3cf0ab11f1548262811ab8ef41ccef8a6fe4936011bfbc8\": container with ID starting with fff003e0a3f0a09cc3cf0ab11f1548262811ab8ef41ccef8a6fe4936011bfbc8 not found: ID does not exist" containerID="fff003e0a3f0a09cc3cf0ab11f1548262811ab8ef41ccef8a6fe4936011bfbc8" Sep 30 17:02:48 crc kubenswrapper[4818]: I0930 17:02:48.755584 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fff003e0a3f0a09cc3cf0ab11f1548262811ab8ef41ccef8a6fe4936011bfbc8"} err="failed to get container status \"fff003e0a3f0a09cc3cf0ab11f1548262811ab8ef41ccef8a6fe4936011bfbc8\": rpc error: code = NotFound desc = could not find container \"fff003e0a3f0a09cc3cf0ab11f1548262811ab8ef41ccef8a6fe4936011bfbc8\": container with ID starting with fff003e0a3f0a09cc3cf0ab11f1548262811ab8ef41ccef8a6fe4936011bfbc8 not found: ID does not exist" Sep 30 17:02:50 crc kubenswrapper[4818]: I0930 17:02:50.030587 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea56cc9d-63e6-4e68-8f01-d93c321c6854" path="/var/lib/kubelet/pods/ea56cc9d-63e6-4e68-8f01-d93c321c6854/volumes" Sep 30 17:02:50 crc kubenswrapper[4818]: I0930 17:02:50.451441 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-fbfxd"] Sep 30 17:02:52 crc kubenswrapper[4818]: I0930 17:02:52.596398 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:02:52 crc kubenswrapper[4818]: I0930 17:02:52.596727 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:02:52 crc kubenswrapper[4818]: I0930 17:02:52.596785 4818 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 17:02:52 crc kubenswrapper[4818]: I0930 17:02:52.597399 4818 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b"} pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 30 17:02:52 crc kubenswrapper[4818]: I0930 17:02:52.597459 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" containerID="cri-o://9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b" gracePeriod=600 Sep 30 17:02:53 crc kubenswrapper[4818]: I0930 17:02:53.692179 4818 generic.go:334] "Generic (PLEG): container finished" podID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerID="9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b" exitCode=0 Sep 30 17:02:53 crc kubenswrapper[4818]: I0930 17:02:53.692239 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerDied","Data":"9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b"} Sep 30 17:02:53 crc kubenswrapper[4818]: I0930 17:02:53.692501 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerStarted","Data":"f602cf125807b205ed113b667482f15568000c1fc70e3c7e75eecadfe6a02087"} Sep 30 17:02:53 crc kubenswrapper[4818]: I0930 17:02:53.843869 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-44qds" Sep 30 17:03:15 crc kubenswrapper[4818]: I0930 17:03:15.479911 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" podUID="94a6a5c0-845a-4f60-b111-eb28393fb07c" containerName="oauth-openshift" containerID="cri-o://c9788da5387ee11f7426151fb32c495398273fd27d7b7d039e54f3e7cba93c24" gracePeriod=15 Sep 30 17:03:15 crc kubenswrapper[4818]: I0930 17:03:15.814522 4818 generic.go:334] "Generic (PLEG): container finished" podID="94a6a5c0-845a-4f60-b111-eb28393fb07c" containerID="c9788da5387ee11f7426151fb32c495398273fd27d7b7d039e54f3e7cba93c24" exitCode=0 Sep 30 17:03:15 crc kubenswrapper[4818]: I0930 17:03:15.814804 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" event={"ID":"94a6a5c0-845a-4f60-b111-eb28393fb07c","Type":"ContainerDied","Data":"c9788da5387ee11f7426151fb32c495398273fd27d7b7d039e54f3e7cba93c24"} Sep 30 17:03:15 crc kubenswrapper[4818]: I0930 17:03:15.974800 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.040558 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-7c89776f78-vf8wf"] Sep 30 17:03:16 crc kubenswrapper[4818]: E0930 17:03:16.040839 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e171b72-1e48-4bf2-8c9d-5a2ce38f4588" containerName="registry-server" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.040857 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e171b72-1e48-4bf2-8c9d-5a2ce38f4588" containerName="registry-server" Sep 30 17:03:16 crc kubenswrapper[4818]: E0930 17:03:16.040870 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94a6a5c0-845a-4f60-b111-eb28393fb07c" containerName="oauth-openshift" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.040877 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="94a6a5c0-845a-4f60-b111-eb28393fb07c" containerName="oauth-openshift" Sep 30 17:03:16 crc kubenswrapper[4818]: E0930 17:03:16.040886 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e171b72-1e48-4bf2-8c9d-5a2ce38f4588" containerName="extract-utilities" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.040892 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e171b72-1e48-4bf2-8c9d-5a2ce38f4588" containerName="extract-utilities" Sep 30 17:03:16 crc kubenswrapper[4818]: E0930 17:03:16.040900 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e764b4c-fdff-4e32-bdec-17a1d80acb31" containerName="extract-content" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.040906 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e764b4c-fdff-4e32-bdec-17a1d80acb31" containerName="extract-content" Sep 30 17:03:16 crc kubenswrapper[4818]: E0930 17:03:16.040914 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e171b72-1e48-4bf2-8c9d-5a2ce38f4588" containerName="extract-content" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.040941 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e171b72-1e48-4bf2-8c9d-5a2ce38f4588" containerName="extract-content" Sep 30 17:03:16 crc kubenswrapper[4818]: E0930 17:03:16.040955 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e764b4c-fdff-4e32-bdec-17a1d80acb31" containerName="extract-utilities" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.040962 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e764b4c-fdff-4e32-bdec-17a1d80acb31" containerName="extract-utilities" Sep 30 17:03:16 crc kubenswrapper[4818]: E0930 17:03:16.040973 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e764b4c-fdff-4e32-bdec-17a1d80acb31" containerName="registry-server" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.040981 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e764b4c-fdff-4e32-bdec-17a1d80acb31" containerName="registry-server" Sep 30 17:03:16 crc kubenswrapper[4818]: E0930 17:03:16.040988 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea56cc9d-63e6-4e68-8f01-d93c321c6854" containerName="extract-content" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.040993 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea56cc9d-63e6-4e68-8f01-d93c321c6854" containerName="extract-content" Sep 30 17:03:16 crc kubenswrapper[4818]: E0930 17:03:16.041005 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a994ace-967b-4a52-9289-6b59bb9b699b" containerName="extract-utilities" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.041012 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a994ace-967b-4a52-9289-6b59bb9b699b" containerName="extract-utilities" Sep 30 17:03:16 crc kubenswrapper[4818]: E0930 17:03:16.041023 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="828db2f6-cb32-4e0c-85df-164eddd8e1ed" containerName="pruner" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.041031 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="828db2f6-cb32-4e0c-85df-164eddd8e1ed" containerName="pruner" Sep 30 17:03:16 crc kubenswrapper[4818]: E0930 17:03:16.041043 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea56cc9d-63e6-4e68-8f01-d93c321c6854" containerName="registry-server" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.041053 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea56cc9d-63e6-4e68-8f01-d93c321c6854" containerName="registry-server" Sep 30 17:03:16 crc kubenswrapper[4818]: E0930 17:03:16.041068 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3515ffdd-13b1-4957-b2c3-87115f35313e" containerName="pruner" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.041075 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="3515ffdd-13b1-4957-b2c3-87115f35313e" containerName="pruner" Sep 30 17:03:16 crc kubenswrapper[4818]: E0930 17:03:16.041090 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a994ace-967b-4a52-9289-6b59bb9b699b" containerName="registry-server" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.041099 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a994ace-967b-4a52-9289-6b59bb9b699b" containerName="registry-server" Sep 30 17:03:16 crc kubenswrapper[4818]: E0930 17:03:16.041109 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea56cc9d-63e6-4e68-8f01-d93c321c6854" containerName="extract-utilities" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.041117 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea56cc9d-63e6-4e68-8f01-d93c321c6854" containerName="extract-utilities" Sep 30 17:03:16 crc kubenswrapper[4818]: E0930 17:03:16.041126 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41ac1391-f116-4d7b-88dd-d694671283dc" containerName="collect-profiles" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.041134 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="41ac1391-f116-4d7b-88dd-d694671283dc" containerName="collect-profiles" Sep 30 17:03:16 crc kubenswrapper[4818]: E0930 17:03:16.041149 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a994ace-967b-4a52-9289-6b59bb9b699b" containerName="extract-content" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.041157 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a994ace-967b-4a52-9289-6b59bb9b699b" containerName="extract-content" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.041263 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="3515ffdd-13b1-4957-b2c3-87115f35313e" containerName="pruner" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.041280 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="828db2f6-cb32-4e0c-85df-164eddd8e1ed" containerName="pruner" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.041293 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="41ac1391-f116-4d7b-88dd-d694671283dc" containerName="collect-profiles" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.041305 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="94a6a5c0-845a-4f60-b111-eb28393fb07c" containerName="oauth-openshift" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.041315 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e171b72-1e48-4bf2-8c9d-5a2ce38f4588" containerName="registry-server" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.041324 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea56cc9d-63e6-4e68-8f01-d93c321c6854" containerName="registry-server" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.041334 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e764b4c-fdff-4e32-bdec-17a1d80acb31" containerName="registry-server" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.041344 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a994ace-967b-4a52-9289-6b59bb9b699b" containerName="registry-server" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.041745 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7c89776f78-vf8wf"] Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.041830 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.125594 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-session\") pod \"94a6a5c0-845a-4f60-b111-eb28393fb07c\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.126057 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-serving-cert\") pod \"94a6a5c0-845a-4f60-b111-eb28393fb07c\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.126284 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-audit-policies\") pod \"94a6a5c0-845a-4f60-b111-eb28393fb07c\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.126486 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-trusted-ca-bundle\") pod \"94a6a5c0-845a-4f60-b111-eb28393fb07c\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.126662 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-service-ca\") pod \"94a6a5c0-845a-4f60-b111-eb28393fb07c\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.126991 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-ocp-branding-template\") pod \"94a6a5c0-845a-4f60-b111-eb28393fb07c\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.127175 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-idp-0-file-data\") pod \"94a6a5c0-845a-4f60-b111-eb28393fb07c\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.127387 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/94a6a5c0-845a-4f60-b111-eb28393fb07c-audit-dir\") pod \"94a6a5c0-845a-4f60-b111-eb28393fb07c\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.127557 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-cliconfig\") pod \"94a6a5c0-845a-4f60-b111-eb28393fb07c\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.127722 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-template-error\") pod \"94a6a5c0-845a-4f60-b111-eb28393fb07c\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.127893 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-template-login\") pod \"94a6a5c0-845a-4f60-b111-eb28393fb07c\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.128158 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-router-certs\") pod \"94a6a5c0-845a-4f60-b111-eb28393fb07c\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.128389 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-template-provider-selection\") pod \"94a6a5c0-845a-4f60-b111-eb28393fb07c\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.128637 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p28xv\" (UniqueName: \"kubernetes.io/projected/94a6a5c0-845a-4f60-b111-eb28393fb07c-kube-api-access-p28xv\") pod \"94a6a5c0-845a-4f60-b111-eb28393fb07c\" (UID: \"94a6a5c0-845a-4f60-b111-eb28393fb07c\") " Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.127423 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "94a6a5c0-845a-4f60-b111-eb28393fb07c" (UID: "94a6a5c0-845a-4f60-b111-eb28393fb07c"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.128102 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "94a6a5c0-845a-4f60-b111-eb28393fb07c" (UID: "94a6a5c0-845a-4f60-b111-eb28393fb07c"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.128669 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "94a6a5c0-845a-4f60-b111-eb28393fb07c" (UID: "94a6a5c0-845a-4f60-b111-eb28393fb07c"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.129231 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/94a6a5c0-845a-4f60-b111-eb28393fb07c-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "94a6a5c0-845a-4f60-b111-eb28393fb07c" (UID: "94a6a5c0-845a-4f60-b111-eb28393fb07c"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.130387 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "94a6a5c0-845a-4f60-b111-eb28393fb07c" (UID: "94a6a5c0-845a-4f60-b111-eb28393fb07c"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.131161 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.131206 4818 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/94a6a5c0-845a-4f60-b111-eb28393fb07c-audit-dir\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.131223 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.131236 4818 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-audit-policies\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.131251 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.134069 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "94a6a5c0-845a-4f60-b111-eb28393fb07c" (UID: "94a6a5c0-845a-4f60-b111-eb28393fb07c"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.134358 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "94a6a5c0-845a-4f60-b111-eb28393fb07c" (UID: "94a6a5c0-845a-4f60-b111-eb28393fb07c"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.134545 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "94a6a5c0-845a-4f60-b111-eb28393fb07c" (UID: "94a6a5c0-845a-4f60-b111-eb28393fb07c"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.134737 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "94a6a5c0-845a-4f60-b111-eb28393fb07c" (UID: "94a6a5c0-845a-4f60-b111-eb28393fb07c"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.135368 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "94a6a5c0-845a-4f60-b111-eb28393fb07c" (UID: "94a6a5c0-845a-4f60-b111-eb28393fb07c"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.135449 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94a6a5c0-845a-4f60-b111-eb28393fb07c-kube-api-access-p28xv" (OuterVolumeSpecName: "kube-api-access-p28xv") pod "94a6a5c0-845a-4f60-b111-eb28393fb07c" (UID: "94a6a5c0-845a-4f60-b111-eb28393fb07c"). InnerVolumeSpecName "kube-api-access-p28xv". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.138646 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "94a6a5c0-845a-4f60-b111-eb28393fb07c" (UID: "94a6a5c0-845a-4f60-b111-eb28393fb07c"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.140592 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "94a6a5c0-845a-4f60-b111-eb28393fb07c" (UID: "94a6a5c0-845a-4f60-b111-eb28393fb07c"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.140831 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "94a6a5c0-845a-4f60-b111-eb28393fb07c" (UID: "94a6a5c0-845a-4f60-b111-eb28393fb07c"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.232583 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.232997 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/74006a38-290d-4869-b85c-86b152d8bffa-audit-policies\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.233375 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.233571 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/74006a38-290d-4869-b85c-86b152d8bffa-audit-dir\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.233749 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-user-template-error\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.233911 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.234148 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.234316 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-session\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.234488 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-service-ca\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.234675 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-router-certs\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.234836 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qww7l\" (UniqueName: \"kubernetes.io/projected/74006a38-290d-4869-b85c-86b152d8bffa-kube-api-access-qww7l\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.235053 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.235258 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-user-template-login\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.235426 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.235627 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.235766 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.235888 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.236051 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.236174 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.236377 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.236435 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p28xv\" (UniqueName: \"kubernetes.io/projected/94a6a5c0-845a-4f60-b111-eb28393fb07c-kube-api-access-p28xv\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.236457 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.236478 4818 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/94a6a5c0-845a-4f60-b111-eb28393fb07c-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.338078 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/74006a38-290d-4869-b85c-86b152d8bffa-audit-dir\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.338146 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-user-template-error\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.338172 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.338202 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.338227 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-session\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.338250 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-service-ca\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.338280 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-router-certs\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.338304 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qww7l\" (UniqueName: \"kubernetes.io/projected/74006a38-290d-4869-b85c-86b152d8bffa-kube-api-access-qww7l\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.338327 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.338355 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-user-template-login\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.338383 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.338419 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.338451 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/74006a38-290d-4869-b85c-86b152d8bffa-audit-policies\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.338480 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.345570 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/74006a38-290d-4869-b85c-86b152d8bffa-audit-dir\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.346655 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.346981 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/74006a38-290d-4869-b85c-86b152d8bffa-audit-policies\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.347365 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.347478 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.347951 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-service-ca\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.351657 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-router-certs\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.352576 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-session\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.352713 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-user-template-login\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.352988 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.353074 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.354519 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.354576 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/74006a38-290d-4869-b85c-86b152d8bffa-v4-0-config-user-template-error\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.374883 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qww7l\" (UniqueName: \"kubernetes.io/projected/74006a38-290d-4869-b85c-86b152d8bffa-kube-api-access-qww7l\") pod \"oauth-openshift-7c89776f78-vf8wf\" (UID: \"74006a38-290d-4869-b85c-86b152d8bffa\") " pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.658516 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.831220 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" event={"ID":"94a6a5c0-845a-4f60-b111-eb28393fb07c","Type":"ContainerDied","Data":"bc34f2d6ad05daed687d4edc072c9f9f93588e920c81891cd7b03bdf21bba187"} Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.831912 4818 scope.go:117] "RemoveContainer" containerID="c9788da5387ee11f7426151fb32c495398273fd27d7b7d039e54f3e7cba93c24" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.831455 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-fbfxd" Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.880982 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-fbfxd"] Sep 30 17:03:16 crc kubenswrapper[4818]: I0930 17:03:16.892873 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-fbfxd"] Sep 30 17:03:17 crc kubenswrapper[4818]: I0930 17:03:17.169641 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7c89776f78-vf8wf"] Sep 30 17:03:17 crc kubenswrapper[4818]: W0930 17:03:17.183578 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod74006a38_290d_4869_b85c_86b152d8bffa.slice/crio-d714369d2c73738ba5fe18ebedc93042ebbada7632b377b3296b6a0a3a77eb89 WatchSource:0}: Error finding container d714369d2c73738ba5fe18ebedc93042ebbada7632b377b3296b6a0a3a77eb89: Status 404 returned error can't find the container with id d714369d2c73738ba5fe18ebedc93042ebbada7632b377b3296b6a0a3a77eb89 Sep 30 17:03:17 crc kubenswrapper[4818]: I0930 17:03:17.840177 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" event={"ID":"74006a38-290d-4869-b85c-86b152d8bffa","Type":"ContainerStarted","Data":"146017a6c93df27bf58b512910be615d1af3ee64f2ff0c75cdcc63e955675db3"} Sep 30 17:03:17 crc kubenswrapper[4818]: I0930 17:03:17.841947 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:17 crc kubenswrapper[4818]: I0930 17:03:17.841987 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" event={"ID":"74006a38-290d-4869-b85c-86b152d8bffa","Type":"ContainerStarted","Data":"d714369d2c73738ba5fe18ebedc93042ebbada7632b377b3296b6a0a3a77eb89"} Sep 30 17:03:17 crc kubenswrapper[4818]: I0930 17:03:17.880745 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" podStartSLOduration=27.880717299 podStartE2EDuration="27.880717299s" podCreationTimestamp="2025-09-30 17:02:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:03:17.870000103 +0000 UTC m=+244.624271959" watchObservedRunningTime="2025-09-30 17:03:17.880717299 +0000 UTC m=+244.634989155" Sep 30 17:03:18 crc kubenswrapper[4818]: I0930 17:03:18.032782 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94a6a5c0-845a-4f60-b111-eb28393fb07c" path="/var/lib/kubelet/pods/94a6a5c0-845a-4f60-b111-eb28393fb07c/volumes" Sep 30 17:03:18 crc kubenswrapper[4818]: I0930 17:03:18.342249 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-7c89776f78-vf8wf" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.053840 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-44qds"] Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.054882 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-44qds" podUID="a59d1ab7-3891-4385-9f6e-8ca1a0bdf204" containerName="registry-server" containerID="cri-o://607c85da60f3aa78bb6993861caa806330e0376ffb4573e75a38e8388d5ab1be" gracePeriod=30 Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.069633 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-28tlv"] Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.070123 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-28tlv" podUID="79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e" containerName="registry-server" containerID="cri-o://bf192f9e98e085956984b811e319edf3973af02e55777c492b5e0915d55c7d69" gracePeriod=30 Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.080288 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8gcpn"] Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.080641 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" podUID="6652f16f-304d-4c4a-84dd-97b68a4aa04b" containerName="marketplace-operator" containerID="cri-o://c12b60eea63bd9f65e5e6121dc1192b5f64948b799427542de01dcd7bc919052" gracePeriod=30 Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.087904 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d4v8q"] Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.088451 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-d4v8q" podUID="f9458d30-6f34-4e65-94a3-dc5787773b24" containerName="registry-server" containerID="cri-o://324008f665adfa1c4f900af7dfb35228c3f6aa8d4163663e39bcd9e3f85b519d" gracePeriod=30 Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.093861 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-dhz7n"] Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.094725 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-dhz7n" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.095012 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pjn54"] Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.095300 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pjn54" podUID="30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6" containerName="registry-server" containerID="cri-o://9d98c65fbae3028bb8bcdec5bf434735c4a85a7f0444af97f8efe7772a1df331" gracePeriod=30 Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.103068 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-dhz7n"] Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.244258 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnqxd\" (UniqueName: \"kubernetes.io/projected/9ca14f10-19ae-485b-b237-7a3e0c1c701a-kube-api-access-rnqxd\") pod \"marketplace-operator-79b997595-dhz7n\" (UID: \"9ca14f10-19ae-485b-b237-7a3e0c1c701a\") " pod="openshift-marketplace/marketplace-operator-79b997595-dhz7n" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.244300 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9ca14f10-19ae-485b-b237-7a3e0c1c701a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-dhz7n\" (UID: \"9ca14f10-19ae-485b-b237-7a3e0c1c701a\") " pod="openshift-marketplace/marketplace-operator-79b997595-dhz7n" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.244326 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9ca14f10-19ae-485b-b237-7a3e0c1c701a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-dhz7n\" (UID: \"9ca14f10-19ae-485b-b237-7a3e0c1c701a\") " pod="openshift-marketplace/marketplace-operator-79b997595-dhz7n" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.346559 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnqxd\" (UniqueName: \"kubernetes.io/projected/9ca14f10-19ae-485b-b237-7a3e0c1c701a-kube-api-access-rnqxd\") pod \"marketplace-operator-79b997595-dhz7n\" (UID: \"9ca14f10-19ae-485b-b237-7a3e0c1c701a\") " pod="openshift-marketplace/marketplace-operator-79b997595-dhz7n" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.346952 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9ca14f10-19ae-485b-b237-7a3e0c1c701a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-dhz7n\" (UID: \"9ca14f10-19ae-485b-b237-7a3e0c1c701a\") " pod="openshift-marketplace/marketplace-operator-79b997595-dhz7n" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.346982 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9ca14f10-19ae-485b-b237-7a3e0c1c701a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-dhz7n\" (UID: \"9ca14f10-19ae-485b-b237-7a3e0c1c701a\") " pod="openshift-marketplace/marketplace-operator-79b997595-dhz7n" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.349045 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9ca14f10-19ae-485b-b237-7a3e0c1c701a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-dhz7n\" (UID: \"9ca14f10-19ae-485b-b237-7a3e0c1c701a\") " pod="openshift-marketplace/marketplace-operator-79b997595-dhz7n" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.353567 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9ca14f10-19ae-485b-b237-7a3e0c1c701a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-dhz7n\" (UID: \"9ca14f10-19ae-485b-b237-7a3e0c1c701a\") " pod="openshift-marketplace/marketplace-operator-79b997595-dhz7n" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.368688 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnqxd\" (UniqueName: \"kubernetes.io/projected/9ca14f10-19ae-485b-b237-7a3e0c1c701a-kube-api-access-rnqxd\") pod \"marketplace-operator-79b997595-dhz7n\" (UID: \"9ca14f10-19ae-485b-b237-7a3e0c1c701a\") " pod="openshift-marketplace/marketplace-operator-79b997595-dhz7n" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.412489 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-dhz7n" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.490945 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-44qds" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.556551 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d4v8q" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.587798 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pjn54" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.590210 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-28tlv" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.624969 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.651362 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jcl4q\" (UniqueName: \"kubernetes.io/projected/a59d1ab7-3891-4385-9f6e-8ca1a0bdf204-kube-api-access-jcl4q\") pod \"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204\" (UID: \"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204\") " Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.651529 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a59d1ab7-3891-4385-9f6e-8ca1a0bdf204-catalog-content\") pod \"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204\" (UID: \"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204\") " Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.651633 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8srpc\" (UniqueName: \"kubernetes.io/projected/f9458d30-6f34-4e65-94a3-dc5787773b24-kube-api-access-8srpc\") pod \"f9458d30-6f34-4e65-94a3-dc5787773b24\" (UID: \"f9458d30-6f34-4e65-94a3-dc5787773b24\") " Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.653657 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a59d1ab7-3891-4385-9f6e-8ca1a0bdf204-utilities" (OuterVolumeSpecName: "utilities") pod "a59d1ab7-3891-4385-9f6e-8ca1a0bdf204" (UID: "a59d1ab7-3891-4385-9f6e-8ca1a0bdf204"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.651901 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a59d1ab7-3891-4385-9f6e-8ca1a0bdf204-utilities\") pod \"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204\" (UID: \"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204\") " Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.654603 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9458d30-6f34-4e65-94a3-dc5787773b24-catalog-content\") pod \"f9458d30-6f34-4e65-94a3-dc5787773b24\" (UID: \"f9458d30-6f34-4e65-94a3-dc5787773b24\") " Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.654637 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9458d30-6f34-4e65-94a3-dc5787773b24-utilities\") pod \"f9458d30-6f34-4e65-94a3-dc5787773b24\" (UID: \"f9458d30-6f34-4e65-94a3-dc5787773b24\") " Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.655050 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a59d1ab7-3891-4385-9f6e-8ca1a0bdf204-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.655653 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9458d30-6f34-4e65-94a3-dc5787773b24-utilities" (OuterVolumeSpecName: "utilities") pod "f9458d30-6f34-4e65-94a3-dc5787773b24" (UID: "f9458d30-6f34-4e65-94a3-dc5787773b24"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.656777 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a59d1ab7-3891-4385-9f6e-8ca1a0bdf204-kube-api-access-jcl4q" (OuterVolumeSpecName: "kube-api-access-jcl4q") pod "a59d1ab7-3891-4385-9f6e-8ca1a0bdf204" (UID: "a59d1ab7-3891-4385-9f6e-8ca1a0bdf204"). InnerVolumeSpecName "kube-api-access-jcl4q". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.657240 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9458d30-6f34-4e65-94a3-dc5787773b24-kube-api-access-8srpc" (OuterVolumeSpecName: "kube-api-access-8srpc") pod "f9458d30-6f34-4e65-94a3-dc5787773b24" (UID: "f9458d30-6f34-4e65-94a3-dc5787773b24"). InnerVolumeSpecName "kube-api-access-8srpc". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.676258 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9458d30-6f34-4e65-94a3-dc5787773b24-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f9458d30-6f34-4e65-94a3-dc5787773b24" (UID: "f9458d30-6f34-4e65-94a3-dc5787773b24"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.700431 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a59d1ab7-3891-4385-9f6e-8ca1a0bdf204-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a59d1ab7-3891-4385-9f6e-8ca1a0bdf204" (UID: "a59d1ab7-3891-4385-9f6e-8ca1a0bdf204"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.755430 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e-utilities\") pod \"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e\" (UID: \"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e\") " Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.755495 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6652f16f-304d-4c4a-84dd-97b68a4aa04b-marketplace-operator-metrics\") pod \"6652f16f-304d-4c4a-84dd-97b68a4aa04b\" (UID: \"6652f16f-304d-4c4a-84dd-97b68a4aa04b\") " Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.755528 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6652f16f-304d-4c4a-84dd-97b68a4aa04b-marketplace-trusted-ca\") pod \"6652f16f-304d-4c4a-84dd-97b68a4aa04b\" (UID: \"6652f16f-304d-4c4a-84dd-97b68a4aa04b\") " Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.755558 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6-utilities\") pod \"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6\" (UID: \"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6\") " Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.755595 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tkg55\" (UniqueName: \"kubernetes.io/projected/79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e-kube-api-access-tkg55\") pod \"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e\" (UID: \"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e\") " Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.755615 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e-catalog-content\") pod \"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e\" (UID: \"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e\") " Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.755644 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6-catalog-content\") pod \"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6\" (UID: \"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6\") " Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.755668 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dwhkv\" (UniqueName: \"kubernetes.io/projected/30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6-kube-api-access-dwhkv\") pod \"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6\" (UID: \"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6\") " Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.755723 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hgmch\" (UniqueName: \"kubernetes.io/projected/6652f16f-304d-4c4a-84dd-97b68a4aa04b-kube-api-access-hgmch\") pod \"6652f16f-304d-4c4a-84dd-97b68a4aa04b\" (UID: \"6652f16f-304d-4c4a-84dd-97b68a4aa04b\") " Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.756002 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9458d30-6f34-4e65-94a3-dc5787773b24-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.756019 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9458d30-6f34-4e65-94a3-dc5787773b24-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.756029 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jcl4q\" (UniqueName: \"kubernetes.io/projected/a59d1ab7-3891-4385-9f6e-8ca1a0bdf204-kube-api-access-jcl4q\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.756039 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a59d1ab7-3891-4385-9f6e-8ca1a0bdf204-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.756049 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8srpc\" (UniqueName: \"kubernetes.io/projected/f9458d30-6f34-4e65-94a3-dc5787773b24-kube-api-access-8srpc\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.756665 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6652f16f-304d-4c4a-84dd-97b68a4aa04b-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "6652f16f-304d-4c4a-84dd-97b68a4aa04b" (UID: "6652f16f-304d-4c4a-84dd-97b68a4aa04b"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.757521 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6-utilities" (OuterVolumeSpecName: "utilities") pod "30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6" (UID: "30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.758684 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6652f16f-304d-4c4a-84dd-97b68a4aa04b-kube-api-access-hgmch" (OuterVolumeSpecName: "kube-api-access-hgmch") pod "6652f16f-304d-4c4a-84dd-97b68a4aa04b" (UID: "6652f16f-304d-4c4a-84dd-97b68a4aa04b"). InnerVolumeSpecName "kube-api-access-hgmch". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.759366 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e-kube-api-access-tkg55" (OuterVolumeSpecName: "kube-api-access-tkg55") pod "79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e" (UID: "79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e"). InnerVolumeSpecName "kube-api-access-tkg55". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.759562 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6652f16f-304d-4c4a-84dd-97b68a4aa04b-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "6652f16f-304d-4c4a-84dd-97b68a4aa04b" (UID: "6652f16f-304d-4c4a-84dd-97b68a4aa04b"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.761135 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6-kube-api-access-dwhkv" (OuterVolumeSpecName: "kube-api-access-dwhkv") pod "30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6" (UID: "30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6"). InnerVolumeSpecName "kube-api-access-dwhkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.764241 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e-utilities" (OuterVolumeSpecName: "utilities") pod "79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e" (UID: "79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.830837 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6" (UID: "30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.850114 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e" (UID: "79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.857024 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.857056 4818 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6652f16f-304d-4c4a-84dd-97b68a4aa04b-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.857072 4818 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6652f16f-304d-4c4a-84dd-97b68a4aa04b-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.857084 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.857099 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tkg55\" (UniqueName: \"kubernetes.io/projected/79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e-kube-api-access-tkg55\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.857110 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.857122 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.857134 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dwhkv\" (UniqueName: \"kubernetes.io/projected/30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6-kube-api-access-dwhkv\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.857146 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hgmch\" (UniqueName: \"kubernetes.io/projected/6652f16f-304d-4c4a-84dd-97b68a4aa04b-kube-api-access-hgmch\") on node \"crc\" DevicePath \"\"" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.921389 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-dhz7n"] Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.938901 4818 generic.go:334] "Generic (PLEG): container finished" podID="6652f16f-304d-4c4a-84dd-97b68a4aa04b" containerID="c12b60eea63bd9f65e5e6121dc1192b5f64948b799427542de01dcd7bc919052" exitCode=0 Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.938971 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" event={"ID":"6652f16f-304d-4c4a-84dd-97b68a4aa04b","Type":"ContainerDied","Data":"c12b60eea63bd9f65e5e6121dc1192b5f64948b799427542de01dcd7bc919052"} Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.938998 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" event={"ID":"6652f16f-304d-4c4a-84dd-97b68a4aa04b","Type":"ContainerDied","Data":"a26b25dea10e988a92fa55580a4f9dea9814411db72a37bce317704100d44a37"} Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.939014 4818 scope.go:117] "RemoveContainer" containerID="c12b60eea63bd9f65e5e6121dc1192b5f64948b799427542de01dcd7bc919052" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.939094 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-8gcpn" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.942437 4818 generic.go:334] "Generic (PLEG): container finished" podID="30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6" containerID="9d98c65fbae3028bb8bcdec5bf434735c4a85a7f0444af97f8efe7772a1df331" exitCode=0 Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.942513 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjn54" event={"ID":"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6","Type":"ContainerDied","Data":"9d98c65fbae3028bb8bcdec5bf434735c4a85a7f0444af97f8efe7772a1df331"} Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.942531 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjn54" event={"ID":"30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6","Type":"ContainerDied","Data":"345be133506c2d6a1e7100bb0dce960f9d716d8041ccbbd40530cba767fb6446"} Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.942595 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pjn54" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.946446 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-44qds" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.946517 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-44qds" event={"ID":"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204","Type":"ContainerDied","Data":"607c85da60f3aa78bb6993861caa806330e0376ffb4573e75a38e8388d5ab1be"} Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.946415 4818 generic.go:334] "Generic (PLEG): container finished" podID="a59d1ab7-3891-4385-9f6e-8ca1a0bdf204" containerID="607c85da60f3aa78bb6993861caa806330e0376ffb4573e75a38e8388d5ab1be" exitCode=0 Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.946766 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-44qds" event={"ID":"a59d1ab7-3891-4385-9f6e-8ca1a0bdf204","Type":"ContainerDied","Data":"19383766f32f8b2f4e07fd5ce53be18e35b73429e9f7b18966d3e058cf90da83"} Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.949685 4818 generic.go:334] "Generic (PLEG): container finished" podID="f9458d30-6f34-4e65-94a3-dc5787773b24" containerID="324008f665adfa1c4f900af7dfb35228c3f6aa8d4163663e39bcd9e3f85b519d" exitCode=0 Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.949740 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d4v8q" event={"ID":"f9458d30-6f34-4e65-94a3-dc5787773b24","Type":"ContainerDied","Data":"324008f665adfa1c4f900af7dfb35228c3f6aa8d4163663e39bcd9e3f85b519d"} Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.949759 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d4v8q" event={"ID":"f9458d30-6f34-4e65-94a3-dc5787773b24","Type":"ContainerDied","Data":"8379f1140c78b33f549e4e22d2e5df78c7a7a665646fd52bbc7f3f71dd7b0d7d"} Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.949815 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d4v8q" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.952645 4818 generic.go:334] "Generic (PLEG): container finished" podID="79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e" containerID="bf192f9e98e085956984b811e319edf3973af02e55777c492b5e0915d55c7d69" exitCode=0 Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.952715 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-28tlv" event={"ID":"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e","Type":"ContainerDied","Data":"bf192f9e98e085956984b811e319edf3973af02e55777c492b5e0915d55c7d69"} Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.952746 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-28tlv" event={"ID":"79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e","Type":"ContainerDied","Data":"ed842e6927800d2079bb5a4719ccd3940c4c1bb672662ed35b440792e2cc4b83"} Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.952822 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-28tlv" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.954259 4818 scope.go:117] "RemoveContainer" containerID="c12b60eea63bd9f65e5e6121dc1192b5f64948b799427542de01dcd7bc919052" Sep 30 17:03:31 crc kubenswrapper[4818]: E0930 17:03:31.954602 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c12b60eea63bd9f65e5e6121dc1192b5f64948b799427542de01dcd7bc919052\": container with ID starting with c12b60eea63bd9f65e5e6121dc1192b5f64948b799427542de01dcd7bc919052 not found: ID does not exist" containerID="c12b60eea63bd9f65e5e6121dc1192b5f64948b799427542de01dcd7bc919052" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.954637 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c12b60eea63bd9f65e5e6121dc1192b5f64948b799427542de01dcd7bc919052"} err="failed to get container status \"c12b60eea63bd9f65e5e6121dc1192b5f64948b799427542de01dcd7bc919052\": rpc error: code = NotFound desc = could not find container \"c12b60eea63bd9f65e5e6121dc1192b5f64948b799427542de01dcd7bc919052\": container with ID starting with c12b60eea63bd9f65e5e6121dc1192b5f64948b799427542de01dcd7bc919052 not found: ID does not exist" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.954661 4818 scope.go:117] "RemoveContainer" containerID="9d98c65fbae3028bb8bcdec5bf434735c4a85a7f0444af97f8efe7772a1df331" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.955131 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-dhz7n" event={"ID":"9ca14f10-19ae-485b-b237-7a3e0c1c701a","Type":"ContainerStarted","Data":"6c10e974943e170776b913a1cb5264baa45cb062038ffcb863a9cd12d1e6c170"} Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.989862 4818 scope.go:117] "RemoveContainer" containerID="def9270b66a0f6befe6c5f951e7e410c72d1a179948057d8c88b9d05c8b61cdc" Sep 30 17:03:31 crc kubenswrapper[4818]: I0930 17:03:31.995298 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pjn54"] Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.011364 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pjn54"] Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.031207 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6" path="/var/lib/kubelet/pods/30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6/volumes" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.032026 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-28tlv"] Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.032049 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-28tlv"] Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.034005 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8gcpn"] Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.035437 4818 scope.go:117] "RemoveContainer" containerID="0aaf29a7b67dbab019cfa36bbca6fe77d3121144a2689d280495418c5bff7c03" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.037375 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8gcpn"] Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.046069 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-44qds"] Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.050818 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-44qds"] Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.054148 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d4v8q"] Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.054769 4818 scope.go:117] "RemoveContainer" containerID="9d98c65fbae3028bb8bcdec5bf434735c4a85a7f0444af97f8efe7772a1df331" Sep 30 17:03:32 crc kubenswrapper[4818]: E0930 17:03:32.055186 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d98c65fbae3028bb8bcdec5bf434735c4a85a7f0444af97f8efe7772a1df331\": container with ID starting with 9d98c65fbae3028bb8bcdec5bf434735c4a85a7f0444af97f8efe7772a1df331 not found: ID does not exist" containerID="9d98c65fbae3028bb8bcdec5bf434735c4a85a7f0444af97f8efe7772a1df331" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.055221 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d98c65fbae3028bb8bcdec5bf434735c4a85a7f0444af97f8efe7772a1df331"} err="failed to get container status \"9d98c65fbae3028bb8bcdec5bf434735c4a85a7f0444af97f8efe7772a1df331\": rpc error: code = NotFound desc = could not find container \"9d98c65fbae3028bb8bcdec5bf434735c4a85a7f0444af97f8efe7772a1df331\": container with ID starting with 9d98c65fbae3028bb8bcdec5bf434735c4a85a7f0444af97f8efe7772a1df331 not found: ID does not exist" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.055246 4818 scope.go:117] "RemoveContainer" containerID="def9270b66a0f6befe6c5f951e7e410c72d1a179948057d8c88b9d05c8b61cdc" Sep 30 17:03:32 crc kubenswrapper[4818]: E0930 17:03:32.055496 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"def9270b66a0f6befe6c5f951e7e410c72d1a179948057d8c88b9d05c8b61cdc\": container with ID starting with def9270b66a0f6befe6c5f951e7e410c72d1a179948057d8c88b9d05c8b61cdc not found: ID does not exist" containerID="def9270b66a0f6befe6c5f951e7e410c72d1a179948057d8c88b9d05c8b61cdc" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.055520 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"def9270b66a0f6befe6c5f951e7e410c72d1a179948057d8c88b9d05c8b61cdc"} err="failed to get container status \"def9270b66a0f6befe6c5f951e7e410c72d1a179948057d8c88b9d05c8b61cdc\": rpc error: code = NotFound desc = could not find container \"def9270b66a0f6befe6c5f951e7e410c72d1a179948057d8c88b9d05c8b61cdc\": container with ID starting with def9270b66a0f6befe6c5f951e7e410c72d1a179948057d8c88b9d05c8b61cdc not found: ID does not exist" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.055538 4818 scope.go:117] "RemoveContainer" containerID="0aaf29a7b67dbab019cfa36bbca6fe77d3121144a2689d280495418c5bff7c03" Sep 30 17:03:32 crc kubenswrapper[4818]: E0930 17:03:32.055761 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0aaf29a7b67dbab019cfa36bbca6fe77d3121144a2689d280495418c5bff7c03\": container with ID starting with 0aaf29a7b67dbab019cfa36bbca6fe77d3121144a2689d280495418c5bff7c03 not found: ID does not exist" containerID="0aaf29a7b67dbab019cfa36bbca6fe77d3121144a2689d280495418c5bff7c03" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.055787 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0aaf29a7b67dbab019cfa36bbca6fe77d3121144a2689d280495418c5bff7c03"} err="failed to get container status \"0aaf29a7b67dbab019cfa36bbca6fe77d3121144a2689d280495418c5bff7c03\": rpc error: code = NotFound desc = could not find container \"0aaf29a7b67dbab019cfa36bbca6fe77d3121144a2689d280495418c5bff7c03\": container with ID starting with 0aaf29a7b67dbab019cfa36bbca6fe77d3121144a2689d280495418c5bff7c03 not found: ID does not exist" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.055803 4818 scope.go:117] "RemoveContainer" containerID="607c85da60f3aa78bb6993861caa806330e0376ffb4573e75a38e8388d5ab1be" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.057279 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-d4v8q"] Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.071829 4818 scope.go:117] "RemoveContainer" containerID="0c51c03d99ae9b4cc15d85bb44f61dd2fcbc4b68b97f72d68ea1ff042f4714cb" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.093602 4818 scope.go:117] "RemoveContainer" containerID="91176e260618b8d89b9d65ff9dc5491a72d3d601758a2b0a40825356ddaad040" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.113727 4818 scope.go:117] "RemoveContainer" containerID="607c85da60f3aa78bb6993861caa806330e0376ffb4573e75a38e8388d5ab1be" Sep 30 17:03:32 crc kubenswrapper[4818]: E0930 17:03:32.114233 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"607c85da60f3aa78bb6993861caa806330e0376ffb4573e75a38e8388d5ab1be\": container with ID starting with 607c85da60f3aa78bb6993861caa806330e0376ffb4573e75a38e8388d5ab1be not found: ID does not exist" containerID="607c85da60f3aa78bb6993861caa806330e0376ffb4573e75a38e8388d5ab1be" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.114297 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"607c85da60f3aa78bb6993861caa806330e0376ffb4573e75a38e8388d5ab1be"} err="failed to get container status \"607c85da60f3aa78bb6993861caa806330e0376ffb4573e75a38e8388d5ab1be\": rpc error: code = NotFound desc = could not find container \"607c85da60f3aa78bb6993861caa806330e0376ffb4573e75a38e8388d5ab1be\": container with ID starting with 607c85da60f3aa78bb6993861caa806330e0376ffb4573e75a38e8388d5ab1be not found: ID does not exist" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.114338 4818 scope.go:117] "RemoveContainer" containerID="0c51c03d99ae9b4cc15d85bb44f61dd2fcbc4b68b97f72d68ea1ff042f4714cb" Sep 30 17:03:32 crc kubenswrapper[4818]: E0930 17:03:32.114847 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c51c03d99ae9b4cc15d85bb44f61dd2fcbc4b68b97f72d68ea1ff042f4714cb\": container with ID starting with 0c51c03d99ae9b4cc15d85bb44f61dd2fcbc4b68b97f72d68ea1ff042f4714cb not found: ID does not exist" containerID="0c51c03d99ae9b4cc15d85bb44f61dd2fcbc4b68b97f72d68ea1ff042f4714cb" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.114883 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c51c03d99ae9b4cc15d85bb44f61dd2fcbc4b68b97f72d68ea1ff042f4714cb"} err="failed to get container status \"0c51c03d99ae9b4cc15d85bb44f61dd2fcbc4b68b97f72d68ea1ff042f4714cb\": rpc error: code = NotFound desc = could not find container \"0c51c03d99ae9b4cc15d85bb44f61dd2fcbc4b68b97f72d68ea1ff042f4714cb\": container with ID starting with 0c51c03d99ae9b4cc15d85bb44f61dd2fcbc4b68b97f72d68ea1ff042f4714cb not found: ID does not exist" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.114909 4818 scope.go:117] "RemoveContainer" containerID="91176e260618b8d89b9d65ff9dc5491a72d3d601758a2b0a40825356ddaad040" Sep 30 17:03:32 crc kubenswrapper[4818]: E0930 17:03:32.115467 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91176e260618b8d89b9d65ff9dc5491a72d3d601758a2b0a40825356ddaad040\": container with ID starting with 91176e260618b8d89b9d65ff9dc5491a72d3d601758a2b0a40825356ddaad040 not found: ID does not exist" containerID="91176e260618b8d89b9d65ff9dc5491a72d3d601758a2b0a40825356ddaad040" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.115507 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91176e260618b8d89b9d65ff9dc5491a72d3d601758a2b0a40825356ddaad040"} err="failed to get container status \"91176e260618b8d89b9d65ff9dc5491a72d3d601758a2b0a40825356ddaad040\": rpc error: code = NotFound desc = could not find container \"91176e260618b8d89b9d65ff9dc5491a72d3d601758a2b0a40825356ddaad040\": container with ID starting with 91176e260618b8d89b9d65ff9dc5491a72d3d601758a2b0a40825356ddaad040 not found: ID does not exist" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.115533 4818 scope.go:117] "RemoveContainer" containerID="324008f665adfa1c4f900af7dfb35228c3f6aa8d4163663e39bcd9e3f85b519d" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.129675 4818 scope.go:117] "RemoveContainer" containerID="acaf311fc4ece5a982cd30064a8cdcef64e5c66e10c2d2c38e23978e11665e87" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.142219 4818 scope.go:117] "RemoveContainer" containerID="0a2acdf55b6d292cefb8e9c3ee0438bbe487750e3f6104823db481eede61699c" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.156658 4818 scope.go:117] "RemoveContainer" containerID="324008f665adfa1c4f900af7dfb35228c3f6aa8d4163663e39bcd9e3f85b519d" Sep 30 17:03:32 crc kubenswrapper[4818]: E0930 17:03:32.159195 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"324008f665adfa1c4f900af7dfb35228c3f6aa8d4163663e39bcd9e3f85b519d\": container with ID starting with 324008f665adfa1c4f900af7dfb35228c3f6aa8d4163663e39bcd9e3f85b519d not found: ID does not exist" containerID="324008f665adfa1c4f900af7dfb35228c3f6aa8d4163663e39bcd9e3f85b519d" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.159251 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"324008f665adfa1c4f900af7dfb35228c3f6aa8d4163663e39bcd9e3f85b519d"} err="failed to get container status \"324008f665adfa1c4f900af7dfb35228c3f6aa8d4163663e39bcd9e3f85b519d\": rpc error: code = NotFound desc = could not find container \"324008f665adfa1c4f900af7dfb35228c3f6aa8d4163663e39bcd9e3f85b519d\": container with ID starting with 324008f665adfa1c4f900af7dfb35228c3f6aa8d4163663e39bcd9e3f85b519d not found: ID does not exist" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.159293 4818 scope.go:117] "RemoveContainer" containerID="acaf311fc4ece5a982cd30064a8cdcef64e5c66e10c2d2c38e23978e11665e87" Sep 30 17:03:32 crc kubenswrapper[4818]: E0930 17:03:32.160545 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"acaf311fc4ece5a982cd30064a8cdcef64e5c66e10c2d2c38e23978e11665e87\": container with ID starting with acaf311fc4ece5a982cd30064a8cdcef64e5c66e10c2d2c38e23978e11665e87 not found: ID does not exist" containerID="acaf311fc4ece5a982cd30064a8cdcef64e5c66e10c2d2c38e23978e11665e87" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.160591 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"acaf311fc4ece5a982cd30064a8cdcef64e5c66e10c2d2c38e23978e11665e87"} err="failed to get container status \"acaf311fc4ece5a982cd30064a8cdcef64e5c66e10c2d2c38e23978e11665e87\": rpc error: code = NotFound desc = could not find container \"acaf311fc4ece5a982cd30064a8cdcef64e5c66e10c2d2c38e23978e11665e87\": container with ID starting with acaf311fc4ece5a982cd30064a8cdcef64e5c66e10c2d2c38e23978e11665e87 not found: ID does not exist" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.160604 4818 scope.go:117] "RemoveContainer" containerID="0a2acdf55b6d292cefb8e9c3ee0438bbe487750e3f6104823db481eede61699c" Sep 30 17:03:32 crc kubenswrapper[4818]: E0930 17:03:32.161051 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a2acdf55b6d292cefb8e9c3ee0438bbe487750e3f6104823db481eede61699c\": container with ID starting with 0a2acdf55b6d292cefb8e9c3ee0438bbe487750e3f6104823db481eede61699c not found: ID does not exist" containerID="0a2acdf55b6d292cefb8e9c3ee0438bbe487750e3f6104823db481eede61699c" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.161090 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a2acdf55b6d292cefb8e9c3ee0438bbe487750e3f6104823db481eede61699c"} err="failed to get container status \"0a2acdf55b6d292cefb8e9c3ee0438bbe487750e3f6104823db481eede61699c\": rpc error: code = NotFound desc = could not find container \"0a2acdf55b6d292cefb8e9c3ee0438bbe487750e3f6104823db481eede61699c\": container with ID starting with 0a2acdf55b6d292cefb8e9c3ee0438bbe487750e3f6104823db481eede61699c not found: ID does not exist" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.161127 4818 scope.go:117] "RemoveContainer" containerID="bf192f9e98e085956984b811e319edf3973af02e55777c492b5e0915d55c7d69" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.177914 4818 scope.go:117] "RemoveContainer" containerID="09d212b0894f13a46ae3ae2f0f568ec2d3138c8b492403288c65ebc48549f104" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.237687 4818 scope.go:117] "RemoveContainer" containerID="a3eafb37892b6da074cdb6dca6bd7e139a6a80d0640fb1077fb90f503a24fbd4" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.252865 4818 scope.go:117] "RemoveContainer" containerID="bf192f9e98e085956984b811e319edf3973af02e55777c492b5e0915d55c7d69" Sep 30 17:03:32 crc kubenswrapper[4818]: E0930 17:03:32.253429 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf192f9e98e085956984b811e319edf3973af02e55777c492b5e0915d55c7d69\": container with ID starting with bf192f9e98e085956984b811e319edf3973af02e55777c492b5e0915d55c7d69 not found: ID does not exist" containerID="bf192f9e98e085956984b811e319edf3973af02e55777c492b5e0915d55c7d69" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.253471 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf192f9e98e085956984b811e319edf3973af02e55777c492b5e0915d55c7d69"} err="failed to get container status \"bf192f9e98e085956984b811e319edf3973af02e55777c492b5e0915d55c7d69\": rpc error: code = NotFound desc = could not find container \"bf192f9e98e085956984b811e319edf3973af02e55777c492b5e0915d55c7d69\": container with ID starting with bf192f9e98e085956984b811e319edf3973af02e55777c492b5e0915d55c7d69 not found: ID does not exist" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.253497 4818 scope.go:117] "RemoveContainer" containerID="09d212b0894f13a46ae3ae2f0f568ec2d3138c8b492403288c65ebc48549f104" Sep 30 17:03:32 crc kubenswrapper[4818]: E0930 17:03:32.253876 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09d212b0894f13a46ae3ae2f0f568ec2d3138c8b492403288c65ebc48549f104\": container with ID starting with 09d212b0894f13a46ae3ae2f0f568ec2d3138c8b492403288c65ebc48549f104 not found: ID does not exist" containerID="09d212b0894f13a46ae3ae2f0f568ec2d3138c8b492403288c65ebc48549f104" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.253933 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09d212b0894f13a46ae3ae2f0f568ec2d3138c8b492403288c65ebc48549f104"} err="failed to get container status \"09d212b0894f13a46ae3ae2f0f568ec2d3138c8b492403288c65ebc48549f104\": rpc error: code = NotFound desc = could not find container \"09d212b0894f13a46ae3ae2f0f568ec2d3138c8b492403288c65ebc48549f104\": container with ID starting with 09d212b0894f13a46ae3ae2f0f568ec2d3138c8b492403288c65ebc48549f104 not found: ID does not exist" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.253960 4818 scope.go:117] "RemoveContainer" containerID="a3eafb37892b6da074cdb6dca6bd7e139a6a80d0640fb1077fb90f503a24fbd4" Sep 30 17:03:32 crc kubenswrapper[4818]: E0930 17:03:32.254912 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3eafb37892b6da074cdb6dca6bd7e139a6a80d0640fb1077fb90f503a24fbd4\": container with ID starting with a3eafb37892b6da074cdb6dca6bd7e139a6a80d0640fb1077fb90f503a24fbd4 not found: ID does not exist" containerID="a3eafb37892b6da074cdb6dca6bd7e139a6a80d0640fb1077fb90f503a24fbd4" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.254981 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3eafb37892b6da074cdb6dca6bd7e139a6a80d0640fb1077fb90f503a24fbd4"} err="failed to get container status \"a3eafb37892b6da074cdb6dca6bd7e139a6a80d0640fb1077fb90f503a24fbd4\": rpc error: code = NotFound desc = could not find container \"a3eafb37892b6da074cdb6dca6bd7e139a6a80d0640fb1077fb90f503a24fbd4\": container with ID starting with a3eafb37892b6da074cdb6dca6bd7e139a6a80d0640fb1077fb90f503a24fbd4 not found: ID does not exist" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.966448 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-dhz7n" event={"ID":"9ca14f10-19ae-485b-b237-7a3e0c1c701a","Type":"ContainerStarted","Data":"1739e0a124f2e15ae0ee6a3bcf4a50b7c1376e54243428d264709d0c6c0eeeb4"} Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.966981 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-dhz7n" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.982798 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-dhz7n" Sep 30 17:03:32 crc kubenswrapper[4818]: I0930 17:03:32.988976 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-dhz7n" podStartSLOduration=1.9889100640000001 podStartE2EDuration="1.988910064s" podCreationTimestamp="2025-09-30 17:03:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:03:32.985308462 +0000 UTC m=+259.739580348" watchObservedRunningTime="2025-09-30 17:03:32.988910064 +0000 UTC m=+259.743181920" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.264153 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fmv9v"] Sep 30 17:03:33 crc kubenswrapper[4818]: E0930 17:03:33.264327 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6" containerName="registry-server" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.264338 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6" containerName="registry-server" Sep 30 17:03:33 crc kubenswrapper[4818]: E0930 17:03:33.264346 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a59d1ab7-3891-4385-9f6e-8ca1a0bdf204" containerName="registry-server" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.264351 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="a59d1ab7-3891-4385-9f6e-8ca1a0bdf204" containerName="registry-server" Sep 30 17:03:33 crc kubenswrapper[4818]: E0930 17:03:33.264358 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9458d30-6f34-4e65-94a3-dc5787773b24" containerName="extract-utilities" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.264364 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9458d30-6f34-4e65-94a3-dc5787773b24" containerName="extract-utilities" Sep 30 17:03:33 crc kubenswrapper[4818]: E0930 17:03:33.264373 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6652f16f-304d-4c4a-84dd-97b68a4aa04b" containerName="marketplace-operator" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.264378 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="6652f16f-304d-4c4a-84dd-97b68a4aa04b" containerName="marketplace-operator" Sep 30 17:03:33 crc kubenswrapper[4818]: E0930 17:03:33.264386 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e" containerName="extract-utilities" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.264392 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e" containerName="extract-utilities" Sep 30 17:03:33 crc kubenswrapper[4818]: E0930 17:03:33.264399 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a59d1ab7-3891-4385-9f6e-8ca1a0bdf204" containerName="extract-content" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.264404 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="a59d1ab7-3891-4385-9f6e-8ca1a0bdf204" containerName="extract-content" Sep 30 17:03:33 crc kubenswrapper[4818]: E0930 17:03:33.264413 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6" containerName="extract-content" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.264419 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6" containerName="extract-content" Sep 30 17:03:33 crc kubenswrapper[4818]: E0930 17:03:33.264425 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e" containerName="registry-server" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.264430 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e" containerName="registry-server" Sep 30 17:03:33 crc kubenswrapper[4818]: E0930 17:03:33.264440 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e" containerName="extract-content" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.264445 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e" containerName="extract-content" Sep 30 17:03:33 crc kubenswrapper[4818]: E0930 17:03:33.264452 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9458d30-6f34-4e65-94a3-dc5787773b24" containerName="extract-content" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.264457 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9458d30-6f34-4e65-94a3-dc5787773b24" containerName="extract-content" Sep 30 17:03:33 crc kubenswrapper[4818]: E0930 17:03:33.264464 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6" containerName="extract-utilities" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.264470 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6" containerName="extract-utilities" Sep 30 17:03:33 crc kubenswrapper[4818]: E0930 17:03:33.264480 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a59d1ab7-3891-4385-9f6e-8ca1a0bdf204" containerName="extract-utilities" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.264487 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="a59d1ab7-3891-4385-9f6e-8ca1a0bdf204" containerName="extract-utilities" Sep 30 17:03:33 crc kubenswrapper[4818]: E0930 17:03:33.264495 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9458d30-6f34-4e65-94a3-dc5787773b24" containerName="registry-server" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.264500 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9458d30-6f34-4e65-94a3-dc5787773b24" containerName="registry-server" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.264572 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="6652f16f-304d-4c4a-84dd-97b68a4aa04b" containerName="marketplace-operator" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.264581 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="a59d1ab7-3891-4385-9f6e-8ca1a0bdf204" containerName="registry-server" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.264589 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e" containerName="registry-server" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.264598 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="30e8e6d1-50e4-46a9-b0b7-ef433e70a2a6" containerName="registry-server" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.264609 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9458d30-6f34-4e65-94a3-dc5787773b24" containerName="registry-server" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.265212 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fmv9v" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.267401 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.277898 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fmv9v"] Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.370532 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrpp5\" (UniqueName: \"kubernetes.io/projected/60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c-kube-api-access-rrpp5\") pod \"redhat-marketplace-fmv9v\" (UID: \"60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c\") " pod="openshift-marketplace/redhat-marketplace-fmv9v" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.370723 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c-catalog-content\") pod \"redhat-marketplace-fmv9v\" (UID: \"60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c\") " pod="openshift-marketplace/redhat-marketplace-fmv9v" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.370833 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c-utilities\") pod \"redhat-marketplace-fmv9v\" (UID: \"60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c\") " pod="openshift-marketplace/redhat-marketplace-fmv9v" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.471754 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c-catalog-content\") pod \"redhat-marketplace-fmv9v\" (UID: \"60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c\") " pod="openshift-marketplace/redhat-marketplace-fmv9v" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.472162 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c-utilities\") pod \"redhat-marketplace-fmv9v\" (UID: \"60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c\") " pod="openshift-marketplace/redhat-marketplace-fmv9v" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.472363 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrpp5\" (UniqueName: \"kubernetes.io/projected/60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c-kube-api-access-rrpp5\") pod \"redhat-marketplace-fmv9v\" (UID: \"60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c\") " pod="openshift-marketplace/redhat-marketplace-fmv9v" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.472550 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c-catalog-content\") pod \"redhat-marketplace-fmv9v\" (UID: \"60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c\") " pod="openshift-marketplace/redhat-marketplace-fmv9v" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.472587 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5sl7h"] Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.472900 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c-utilities\") pod \"redhat-marketplace-fmv9v\" (UID: \"60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c\") " pod="openshift-marketplace/redhat-marketplace-fmv9v" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.474513 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5sl7h" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.480608 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.495749 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrpp5\" (UniqueName: \"kubernetes.io/projected/60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c-kube-api-access-rrpp5\") pod \"redhat-marketplace-fmv9v\" (UID: \"60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c\") " pod="openshift-marketplace/redhat-marketplace-fmv9v" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.504881 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5sl7h"] Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.574263 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6q22q\" (UniqueName: \"kubernetes.io/projected/8b734a68-025d-47ea-99d0-bf680e9e54cd-kube-api-access-6q22q\") pod \"certified-operators-5sl7h\" (UID: \"8b734a68-025d-47ea-99d0-bf680e9e54cd\") " pod="openshift-marketplace/certified-operators-5sl7h" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.574330 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b734a68-025d-47ea-99d0-bf680e9e54cd-utilities\") pod \"certified-operators-5sl7h\" (UID: \"8b734a68-025d-47ea-99d0-bf680e9e54cd\") " pod="openshift-marketplace/certified-operators-5sl7h" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.574420 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b734a68-025d-47ea-99d0-bf680e9e54cd-catalog-content\") pod \"certified-operators-5sl7h\" (UID: \"8b734a68-025d-47ea-99d0-bf680e9e54cd\") " pod="openshift-marketplace/certified-operators-5sl7h" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.583123 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fmv9v" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.675345 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b734a68-025d-47ea-99d0-bf680e9e54cd-catalog-content\") pod \"certified-operators-5sl7h\" (UID: \"8b734a68-025d-47ea-99d0-bf680e9e54cd\") " pod="openshift-marketplace/certified-operators-5sl7h" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.675716 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6q22q\" (UniqueName: \"kubernetes.io/projected/8b734a68-025d-47ea-99d0-bf680e9e54cd-kube-api-access-6q22q\") pod \"certified-operators-5sl7h\" (UID: \"8b734a68-025d-47ea-99d0-bf680e9e54cd\") " pod="openshift-marketplace/certified-operators-5sl7h" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.675757 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b734a68-025d-47ea-99d0-bf680e9e54cd-utilities\") pod \"certified-operators-5sl7h\" (UID: \"8b734a68-025d-47ea-99d0-bf680e9e54cd\") " pod="openshift-marketplace/certified-operators-5sl7h" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.676260 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b734a68-025d-47ea-99d0-bf680e9e54cd-catalog-content\") pod \"certified-operators-5sl7h\" (UID: \"8b734a68-025d-47ea-99d0-bf680e9e54cd\") " pod="openshift-marketplace/certified-operators-5sl7h" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.676461 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b734a68-025d-47ea-99d0-bf680e9e54cd-utilities\") pod \"certified-operators-5sl7h\" (UID: \"8b734a68-025d-47ea-99d0-bf680e9e54cd\") " pod="openshift-marketplace/certified-operators-5sl7h" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.699764 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6q22q\" (UniqueName: \"kubernetes.io/projected/8b734a68-025d-47ea-99d0-bf680e9e54cd-kube-api-access-6q22q\") pod \"certified-operators-5sl7h\" (UID: \"8b734a68-025d-47ea-99d0-bf680e9e54cd\") " pod="openshift-marketplace/certified-operators-5sl7h" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.831440 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5sl7h" Sep 30 17:03:33 crc kubenswrapper[4818]: I0930 17:03:33.992814 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fmv9v"] Sep 30 17:03:34 crc kubenswrapper[4818]: W0930 17:03:34.006699 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod60d6fa53_a6a2_4a62_97a6_5cbe3c1b222c.slice/crio-481c8ecb516200d7f735cca3a82f533da6157b0bfceb180176fb9ea51e41f3cd WatchSource:0}: Error finding container 481c8ecb516200d7f735cca3a82f533da6157b0bfceb180176fb9ea51e41f3cd: Status 404 returned error can't find the container with id 481c8ecb516200d7f735cca3a82f533da6157b0bfceb180176fb9ea51e41f3cd Sep 30 17:03:34 crc kubenswrapper[4818]: I0930 17:03:34.034862 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6652f16f-304d-4c4a-84dd-97b68a4aa04b" path="/var/lib/kubelet/pods/6652f16f-304d-4c4a-84dd-97b68a4aa04b/volumes" Sep 30 17:03:34 crc kubenswrapper[4818]: I0930 17:03:34.036944 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e" path="/var/lib/kubelet/pods/79f4e678-c1bf-4bd3-b2b7-cf5efcd6df2e/volumes" Sep 30 17:03:34 crc kubenswrapper[4818]: I0930 17:03:34.037641 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a59d1ab7-3891-4385-9f6e-8ca1a0bdf204" path="/var/lib/kubelet/pods/a59d1ab7-3891-4385-9f6e-8ca1a0bdf204/volumes" Sep 30 17:03:34 crc kubenswrapper[4818]: I0930 17:03:34.038845 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9458d30-6f34-4e65-94a3-dc5787773b24" path="/var/lib/kubelet/pods/f9458d30-6f34-4e65-94a3-dc5787773b24/volumes" Sep 30 17:03:34 crc kubenswrapper[4818]: I0930 17:03:34.072761 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5sl7h"] Sep 30 17:03:34 crc kubenswrapper[4818]: W0930 17:03:34.080144 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8b734a68_025d_47ea_99d0_bf680e9e54cd.slice/crio-03a4fb784e11b45b1c243c27dfe08bd594494ac10b31ba329af785bcb667127e WatchSource:0}: Error finding container 03a4fb784e11b45b1c243c27dfe08bd594494ac10b31ba329af785bcb667127e: Status 404 returned error can't find the container with id 03a4fb784e11b45b1c243c27dfe08bd594494ac10b31ba329af785bcb667127e Sep 30 17:03:34 crc kubenswrapper[4818]: I0930 17:03:34.987538 4818 generic.go:334] "Generic (PLEG): container finished" podID="60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c" containerID="eaa9d2dff2764c073b99a06c6c86fe1d37d05dad5972a7fd87e4c44602cadfa9" exitCode=0 Sep 30 17:03:34 crc kubenswrapper[4818]: I0930 17:03:34.987615 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fmv9v" event={"ID":"60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c","Type":"ContainerDied","Data":"eaa9d2dff2764c073b99a06c6c86fe1d37d05dad5972a7fd87e4c44602cadfa9"} Sep 30 17:03:34 crc kubenswrapper[4818]: I0930 17:03:34.988089 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fmv9v" event={"ID":"60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c","Type":"ContainerStarted","Data":"481c8ecb516200d7f735cca3a82f533da6157b0bfceb180176fb9ea51e41f3cd"} Sep 30 17:03:34 crc kubenswrapper[4818]: I0930 17:03:34.990850 4818 generic.go:334] "Generic (PLEG): container finished" podID="8b734a68-025d-47ea-99d0-bf680e9e54cd" containerID="6950ce3d21fc1d81b50a6e19440907c58173a1cac888248b6bf6d966c1e9df4d" exitCode=0 Sep 30 17:03:34 crc kubenswrapper[4818]: I0930 17:03:34.990915 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5sl7h" event={"ID":"8b734a68-025d-47ea-99d0-bf680e9e54cd","Type":"ContainerDied","Data":"6950ce3d21fc1d81b50a6e19440907c58173a1cac888248b6bf6d966c1e9df4d"} Sep 30 17:03:34 crc kubenswrapper[4818]: I0930 17:03:34.990975 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5sl7h" event={"ID":"8b734a68-025d-47ea-99d0-bf680e9e54cd","Type":"ContainerStarted","Data":"03a4fb784e11b45b1c243c27dfe08bd594494ac10b31ba329af785bcb667127e"} Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.668097 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rdxdl"] Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.670749 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rdxdl" Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.673808 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rdxdl"] Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.721062 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.821916 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2wl6\" (UniqueName: \"kubernetes.io/projected/838572b4-547f-482a-8a5f-deb28aa2e587-kube-api-access-r2wl6\") pod \"community-operators-rdxdl\" (UID: \"838572b4-547f-482a-8a5f-deb28aa2e587\") " pod="openshift-marketplace/community-operators-rdxdl" Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.822113 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/838572b4-547f-482a-8a5f-deb28aa2e587-catalog-content\") pod \"community-operators-rdxdl\" (UID: \"838572b4-547f-482a-8a5f-deb28aa2e587\") " pod="openshift-marketplace/community-operators-rdxdl" Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.822269 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/838572b4-547f-482a-8a5f-deb28aa2e587-utilities\") pod \"community-operators-rdxdl\" (UID: \"838572b4-547f-482a-8a5f-deb28aa2e587\") " pod="openshift-marketplace/community-operators-rdxdl" Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.861053 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-skzzr"] Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.862010 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-skzzr" Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.864251 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.878750 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-skzzr"] Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.923894 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2wl6\" (UniqueName: \"kubernetes.io/projected/838572b4-547f-482a-8a5f-deb28aa2e587-kube-api-access-r2wl6\") pod \"community-operators-rdxdl\" (UID: \"838572b4-547f-482a-8a5f-deb28aa2e587\") " pod="openshift-marketplace/community-operators-rdxdl" Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.924070 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qz2p\" (UniqueName: \"kubernetes.io/projected/9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9-kube-api-access-2qz2p\") pod \"redhat-operators-skzzr\" (UID: \"9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9\") " pod="openshift-marketplace/redhat-operators-skzzr" Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.924125 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/838572b4-547f-482a-8a5f-deb28aa2e587-catalog-content\") pod \"community-operators-rdxdl\" (UID: \"838572b4-547f-482a-8a5f-deb28aa2e587\") " pod="openshift-marketplace/community-operators-rdxdl" Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.924189 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/838572b4-547f-482a-8a5f-deb28aa2e587-utilities\") pod \"community-operators-rdxdl\" (UID: \"838572b4-547f-482a-8a5f-deb28aa2e587\") " pod="openshift-marketplace/community-operators-rdxdl" Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.924255 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9-utilities\") pod \"redhat-operators-skzzr\" (UID: \"9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9\") " pod="openshift-marketplace/redhat-operators-skzzr" Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.924281 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9-catalog-content\") pod \"redhat-operators-skzzr\" (UID: \"9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9\") " pod="openshift-marketplace/redhat-operators-skzzr" Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.924636 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/838572b4-547f-482a-8a5f-deb28aa2e587-catalog-content\") pod \"community-operators-rdxdl\" (UID: \"838572b4-547f-482a-8a5f-deb28aa2e587\") " pod="openshift-marketplace/community-operators-rdxdl" Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.924685 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/838572b4-547f-482a-8a5f-deb28aa2e587-utilities\") pod \"community-operators-rdxdl\" (UID: \"838572b4-547f-482a-8a5f-deb28aa2e587\") " pod="openshift-marketplace/community-operators-rdxdl" Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.942003 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2wl6\" (UniqueName: \"kubernetes.io/projected/838572b4-547f-482a-8a5f-deb28aa2e587-kube-api-access-r2wl6\") pod \"community-operators-rdxdl\" (UID: \"838572b4-547f-482a-8a5f-deb28aa2e587\") " pod="openshift-marketplace/community-operators-rdxdl" Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.998124 4818 generic.go:334] "Generic (PLEG): container finished" podID="60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c" containerID="4b76e96e3125e42f6b490462b698bab28d28fedc501b684fba3a193139578151" exitCode=0 Sep 30 17:03:35 crc kubenswrapper[4818]: I0930 17:03:35.998178 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fmv9v" event={"ID":"60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c","Type":"ContainerDied","Data":"4b76e96e3125e42f6b490462b698bab28d28fedc501b684fba3a193139578151"} Sep 30 17:03:36 crc kubenswrapper[4818]: I0930 17:03:36.024975 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9-utilities\") pod \"redhat-operators-skzzr\" (UID: \"9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9\") " pod="openshift-marketplace/redhat-operators-skzzr" Sep 30 17:03:36 crc kubenswrapper[4818]: I0930 17:03:36.025298 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9-catalog-content\") pod \"redhat-operators-skzzr\" (UID: \"9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9\") " pod="openshift-marketplace/redhat-operators-skzzr" Sep 30 17:03:36 crc kubenswrapper[4818]: I0930 17:03:36.025345 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qz2p\" (UniqueName: \"kubernetes.io/projected/9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9-kube-api-access-2qz2p\") pod \"redhat-operators-skzzr\" (UID: \"9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9\") " pod="openshift-marketplace/redhat-operators-skzzr" Sep 30 17:03:36 crc kubenswrapper[4818]: I0930 17:03:36.025832 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9-utilities\") pod \"redhat-operators-skzzr\" (UID: \"9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9\") " pod="openshift-marketplace/redhat-operators-skzzr" Sep 30 17:03:36 crc kubenswrapper[4818]: I0930 17:03:36.025960 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9-catalog-content\") pod \"redhat-operators-skzzr\" (UID: \"9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9\") " pod="openshift-marketplace/redhat-operators-skzzr" Sep 30 17:03:36 crc kubenswrapper[4818]: I0930 17:03:36.037465 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rdxdl" Sep 30 17:03:36 crc kubenswrapper[4818]: I0930 17:03:36.057470 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qz2p\" (UniqueName: \"kubernetes.io/projected/9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9-kube-api-access-2qz2p\") pod \"redhat-operators-skzzr\" (UID: \"9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9\") " pod="openshift-marketplace/redhat-operators-skzzr" Sep 30 17:03:36 crc kubenswrapper[4818]: I0930 17:03:36.185766 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-skzzr" Sep 30 17:03:36 crc kubenswrapper[4818]: I0930 17:03:36.473980 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rdxdl"] Sep 30 17:03:36 crc kubenswrapper[4818]: W0930 17:03:36.488271 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod838572b4_547f_482a_8a5f_deb28aa2e587.slice/crio-6a3a273af3c23d102130b053e0d90083c0ee35c3d4620fd64123950b586a0337 WatchSource:0}: Error finding container 6a3a273af3c23d102130b053e0d90083c0ee35c3d4620fd64123950b586a0337: Status 404 returned error can't find the container with id 6a3a273af3c23d102130b053e0d90083c0ee35c3d4620fd64123950b586a0337 Sep 30 17:03:36 crc kubenswrapper[4818]: I0930 17:03:36.586190 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-skzzr"] Sep 30 17:03:36 crc kubenswrapper[4818]: W0930 17:03:36.610999 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9ae4ca0e_f830_45ee_bdfb_7e97f5bf28c9.slice/crio-c789fb0697379238b983d21762bbd4e9e028dea047290977a2f3a1487b7091c2 WatchSource:0}: Error finding container c789fb0697379238b983d21762bbd4e9e028dea047290977a2f3a1487b7091c2: Status 404 returned error can't find the container with id c789fb0697379238b983d21762bbd4e9e028dea047290977a2f3a1487b7091c2 Sep 30 17:03:37 crc kubenswrapper[4818]: I0930 17:03:37.005785 4818 generic.go:334] "Generic (PLEG): container finished" podID="838572b4-547f-482a-8a5f-deb28aa2e587" containerID="be15f5f23ae06fd490c249cca99ff97a7cc2a246a0e9364ccf8553dbf27fb7e0" exitCode=0 Sep 30 17:03:37 crc kubenswrapper[4818]: I0930 17:03:37.005839 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rdxdl" event={"ID":"838572b4-547f-482a-8a5f-deb28aa2e587","Type":"ContainerDied","Data":"be15f5f23ae06fd490c249cca99ff97a7cc2a246a0e9364ccf8553dbf27fb7e0"} Sep 30 17:03:37 crc kubenswrapper[4818]: I0930 17:03:37.006351 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rdxdl" event={"ID":"838572b4-547f-482a-8a5f-deb28aa2e587","Type":"ContainerStarted","Data":"6a3a273af3c23d102130b053e0d90083c0ee35c3d4620fd64123950b586a0337"} Sep 30 17:03:37 crc kubenswrapper[4818]: I0930 17:03:37.011528 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fmv9v" event={"ID":"60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c","Type":"ContainerStarted","Data":"979a372af8591789a1e3509bf371071722eda0bef8694e2c07d25d0dd8eed452"} Sep 30 17:03:37 crc kubenswrapper[4818]: I0930 17:03:37.014583 4818 generic.go:334] "Generic (PLEG): container finished" podID="8b734a68-025d-47ea-99d0-bf680e9e54cd" containerID="10bd7cc0ccec02ba8109b510fd9b01419ce4f614d4c2bfe83cb78a46e8ae927d" exitCode=0 Sep 30 17:03:37 crc kubenswrapper[4818]: I0930 17:03:37.014639 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5sl7h" event={"ID":"8b734a68-025d-47ea-99d0-bf680e9e54cd","Type":"ContainerDied","Data":"10bd7cc0ccec02ba8109b510fd9b01419ce4f614d4c2bfe83cb78a46e8ae927d"} Sep 30 17:03:37 crc kubenswrapper[4818]: I0930 17:03:37.016290 4818 generic.go:334] "Generic (PLEG): container finished" podID="9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9" containerID="93fba7154bd0adf5cc659ba2a9104313927d6d95df5e5bf9b8e59a4b65d275b5" exitCode=0 Sep 30 17:03:37 crc kubenswrapper[4818]: I0930 17:03:37.016340 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-skzzr" event={"ID":"9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9","Type":"ContainerDied","Data":"93fba7154bd0adf5cc659ba2a9104313927d6d95df5e5bf9b8e59a4b65d275b5"} Sep 30 17:03:37 crc kubenswrapper[4818]: I0930 17:03:37.016354 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-skzzr" event={"ID":"9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9","Type":"ContainerStarted","Data":"c789fb0697379238b983d21762bbd4e9e028dea047290977a2f3a1487b7091c2"} Sep 30 17:03:37 crc kubenswrapper[4818]: I0930 17:03:37.099104 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fmv9v" podStartSLOduration=2.657663216 podStartE2EDuration="4.099089029s" podCreationTimestamp="2025-09-30 17:03:33 +0000 UTC" firstStartedPulling="2025-09-30 17:03:34.989472104 +0000 UTC m=+261.743743920" lastFinishedPulling="2025-09-30 17:03:36.430897917 +0000 UTC m=+263.185169733" observedRunningTime="2025-09-30 17:03:37.096150938 +0000 UTC m=+263.850422764" watchObservedRunningTime="2025-09-30 17:03:37.099089029 +0000 UTC m=+263.853360845" Sep 30 17:03:38 crc kubenswrapper[4818]: I0930 17:03:38.026872 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5sl7h" event={"ID":"8b734a68-025d-47ea-99d0-bf680e9e54cd","Type":"ContainerStarted","Data":"93e0516f6876aef39dd799214f07666e9016bbc6dcf4957a32e1ab2b2398bad5"} Sep 30 17:03:38 crc kubenswrapper[4818]: I0930 17:03:38.028215 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rdxdl" event={"ID":"838572b4-547f-482a-8a5f-deb28aa2e587","Type":"ContainerStarted","Data":"4f01d3284f1f1e5ae8667b1be237854c4e5d545883327f91347fbe668228cb3b"} Sep 30 17:03:38 crc kubenswrapper[4818]: I0930 17:03:38.040366 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5sl7h" podStartSLOduration=2.390607572 podStartE2EDuration="5.040349122s" podCreationTimestamp="2025-09-30 17:03:33 +0000 UTC" firstStartedPulling="2025-09-30 17:03:34.994210455 +0000 UTC m=+261.748482271" lastFinishedPulling="2025-09-30 17:03:37.643952005 +0000 UTC m=+264.398223821" observedRunningTime="2025-09-30 17:03:38.038714887 +0000 UTC m=+264.792986703" watchObservedRunningTime="2025-09-30 17:03:38.040349122 +0000 UTC m=+264.794620938" Sep 30 17:03:39 crc kubenswrapper[4818]: I0930 17:03:39.044812 4818 generic.go:334] "Generic (PLEG): container finished" podID="9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9" containerID="ccbf4d3eed273dbb16f8b9b5699fae4233c9e64599da1c073dccc4736e6fd9f0" exitCode=0 Sep 30 17:03:39 crc kubenswrapper[4818]: I0930 17:03:39.044955 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-skzzr" event={"ID":"9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9","Type":"ContainerDied","Data":"ccbf4d3eed273dbb16f8b9b5699fae4233c9e64599da1c073dccc4736e6fd9f0"} Sep 30 17:03:39 crc kubenswrapper[4818]: I0930 17:03:39.050714 4818 generic.go:334] "Generic (PLEG): container finished" podID="838572b4-547f-482a-8a5f-deb28aa2e587" containerID="4f01d3284f1f1e5ae8667b1be237854c4e5d545883327f91347fbe668228cb3b" exitCode=0 Sep 30 17:03:39 crc kubenswrapper[4818]: I0930 17:03:39.052835 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rdxdl" event={"ID":"838572b4-547f-482a-8a5f-deb28aa2e587","Type":"ContainerDied","Data":"4f01d3284f1f1e5ae8667b1be237854c4e5d545883327f91347fbe668228cb3b"} Sep 30 17:03:40 crc kubenswrapper[4818]: I0930 17:03:40.056490 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-skzzr" event={"ID":"9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9","Type":"ContainerStarted","Data":"80da16d32ca93211ebc3306f34aeb610ae0448b944f1b56fd52c9e72fea59d5e"} Sep 30 17:03:40 crc kubenswrapper[4818]: I0930 17:03:40.058674 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rdxdl" event={"ID":"838572b4-547f-482a-8a5f-deb28aa2e587","Type":"ContainerStarted","Data":"78b301c53cdb6f260aa874988ed88d16eb5a744f279414d1dd137edd13f1cd7b"} Sep 30 17:03:40 crc kubenswrapper[4818]: I0930 17:03:40.075403 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-skzzr" podStartSLOduration=2.369323651 podStartE2EDuration="5.075379667s" podCreationTimestamp="2025-09-30 17:03:35 +0000 UTC" firstStartedPulling="2025-09-30 17:03:37.017425784 +0000 UTC m=+263.771697610" lastFinishedPulling="2025-09-30 17:03:39.72348181 +0000 UTC m=+266.477753626" observedRunningTime="2025-09-30 17:03:40.069978368 +0000 UTC m=+266.824250194" watchObservedRunningTime="2025-09-30 17:03:40.075379667 +0000 UTC m=+266.829651483" Sep 30 17:03:40 crc kubenswrapper[4818]: I0930 17:03:40.093915 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rdxdl" podStartSLOduration=2.6018727520000002 podStartE2EDuration="5.093895798s" podCreationTimestamp="2025-09-30 17:03:35 +0000 UTC" firstStartedPulling="2025-09-30 17:03:37.007358696 +0000 UTC m=+263.761630522" lastFinishedPulling="2025-09-30 17:03:39.499381742 +0000 UTC m=+266.253653568" observedRunningTime="2025-09-30 17:03:40.089899108 +0000 UTC m=+266.844170924" watchObservedRunningTime="2025-09-30 17:03:40.093895798 +0000 UTC m=+266.848167614" Sep 30 17:03:43 crc kubenswrapper[4818]: I0930 17:03:43.583708 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fmv9v" Sep 30 17:03:43 crc kubenswrapper[4818]: I0930 17:03:43.584698 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fmv9v" Sep 30 17:03:43 crc kubenswrapper[4818]: I0930 17:03:43.640407 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fmv9v" Sep 30 17:03:43 crc kubenswrapper[4818]: I0930 17:03:43.832284 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5sl7h" Sep 30 17:03:43 crc kubenswrapper[4818]: I0930 17:03:43.832323 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5sl7h" Sep 30 17:03:43 crc kubenswrapper[4818]: I0930 17:03:43.868694 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5sl7h" Sep 30 17:03:44 crc kubenswrapper[4818]: I0930 17:03:44.120219 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fmv9v" Sep 30 17:03:44 crc kubenswrapper[4818]: I0930 17:03:44.135956 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5sl7h" Sep 30 17:03:46 crc kubenswrapper[4818]: I0930 17:03:46.037634 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rdxdl" Sep 30 17:03:46 crc kubenswrapper[4818]: I0930 17:03:46.037741 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rdxdl" Sep 30 17:03:46 crc kubenswrapper[4818]: I0930 17:03:46.103051 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rdxdl" Sep 30 17:03:46 crc kubenswrapper[4818]: I0930 17:03:46.145557 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rdxdl" Sep 30 17:03:46 crc kubenswrapper[4818]: I0930 17:03:46.186378 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-skzzr" Sep 30 17:03:46 crc kubenswrapper[4818]: I0930 17:03:46.186437 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-skzzr" Sep 30 17:03:46 crc kubenswrapper[4818]: I0930 17:03:46.242010 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-skzzr" Sep 30 17:03:47 crc kubenswrapper[4818]: I0930 17:03:47.172593 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-skzzr" Sep 30 17:04:52 crc kubenswrapper[4818]: I0930 17:04:52.596632 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:04:52 crc kubenswrapper[4818]: I0930 17:04:52.598056 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:05:22 crc kubenswrapper[4818]: I0930 17:05:22.596399 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:05:22 crc kubenswrapper[4818]: I0930 17:05:22.597068 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:05:52 crc kubenswrapper[4818]: I0930 17:05:52.595998 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:05:52 crc kubenswrapper[4818]: I0930 17:05:52.598169 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:05:52 crc kubenswrapper[4818]: I0930 17:05:52.598363 4818 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 17:05:52 crc kubenswrapper[4818]: I0930 17:05:52.599540 4818 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f602cf125807b205ed113b667482f15568000c1fc70e3c7e75eecadfe6a02087"} pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 30 17:05:52 crc kubenswrapper[4818]: I0930 17:05:52.599844 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" containerID="cri-o://f602cf125807b205ed113b667482f15568000c1fc70e3c7e75eecadfe6a02087" gracePeriod=600 Sep 30 17:05:52 crc kubenswrapper[4818]: I0930 17:05:52.937964 4818 generic.go:334] "Generic (PLEG): container finished" podID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerID="f602cf125807b205ed113b667482f15568000c1fc70e3c7e75eecadfe6a02087" exitCode=0 Sep 30 17:05:52 crc kubenswrapper[4818]: I0930 17:05:52.938060 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerDied","Data":"f602cf125807b205ed113b667482f15568000c1fc70e3c7e75eecadfe6a02087"} Sep 30 17:05:52 crc kubenswrapper[4818]: I0930 17:05:52.938521 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerStarted","Data":"976854ec79c20d7638f4498aeb9ec7b57e80f726c0c53aeb06c9052ad1713c03"} Sep 30 17:05:52 crc kubenswrapper[4818]: I0930 17:05:52.938560 4818 scope.go:117] "RemoveContainer" containerID="9a8974839d1c7be4e1d84efc973a8358d8fb4586bdd00aeab495f08ef2f3051b" Sep 30 17:06:06 crc kubenswrapper[4818]: I0930 17:06:06.924510 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-42prh"] Sep 30 17:06:06 crc kubenswrapper[4818]: I0930 17:06:06.926018 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:06 crc kubenswrapper[4818]: I0930 17:06:06.946219 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-42prh"] Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.064118 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-installation-pull-secrets\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.064210 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-registry-tls\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.064283 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-ca-trust-extracted\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.064345 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-bound-sa-token\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.064385 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-registry-certificates\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.064403 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-trusted-ca\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.064440 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5cq5h\" (UniqueName: \"kubernetes.io/projected/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-kube-api-access-5cq5h\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.064497 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.093526 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.166255 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-registry-tls\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.166326 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-ca-trust-extracted\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.166388 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-bound-sa-token\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.166450 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-registry-certificates\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.166486 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-trusted-ca\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.166521 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5cq5h\" (UniqueName: \"kubernetes.io/projected/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-kube-api-access-5cq5h\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.166624 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-installation-pull-secrets\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.167845 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-ca-trust-extracted\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.168356 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-trusted-ca\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.168557 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-registry-certificates\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.173389 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-registry-tls\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.173759 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-installation-pull-secrets\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.184637 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5cq5h\" (UniqueName: \"kubernetes.io/projected/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-kube-api-access-5cq5h\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.188346 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc-bound-sa-token\") pod \"image-registry-66df7c8f76-42prh\" (UID: \"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc\") " pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.244398 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:07 crc kubenswrapper[4818]: I0930 17:06:07.523767 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-42prh"] Sep 30 17:06:08 crc kubenswrapper[4818]: I0930 17:06:08.047611 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-42prh" event={"ID":"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc","Type":"ContainerStarted","Data":"eb15330189a32be6a4c836db6555b4911f9dd89b6f5d223c3383259d0a9e39be"} Sep 30 17:06:08 crc kubenswrapper[4818]: I0930 17:06:08.047657 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-42prh" event={"ID":"ab2bfcb1-7b0e-4c4d-8fc2-215f6cb159cc","Type":"ContainerStarted","Data":"9d210c51e1e52eb8ee5346866da68ed0269400c522eecd7a8e7d1996ae9d5e7e"} Sep 30 17:06:08 crc kubenswrapper[4818]: I0930 17:06:08.047792 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:08 crc kubenswrapper[4818]: I0930 17:06:08.070369 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-42prh" podStartSLOduration=2.070346364 podStartE2EDuration="2.070346364s" podCreationTimestamp="2025-09-30 17:06:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:06:08.064223988 +0000 UTC m=+414.818495804" watchObservedRunningTime="2025-09-30 17:06:08.070346364 +0000 UTC m=+414.824618200" Sep 30 17:06:27 crc kubenswrapper[4818]: I0930 17:06:27.253186 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-42prh" Sep 30 17:06:27 crc kubenswrapper[4818]: I0930 17:06:27.333626 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kml8f"] Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.379553 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" podUID="0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb" containerName="registry" containerID="cri-o://f3f918c7fd7a129ff7a0cf4d1ea50520fa54c7862d90ddc903d51cdb4e7d5dc8" gracePeriod=30 Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.801906 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.873199 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-registry-certificates\") pod \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.873246 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-trusted-ca\") pod \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.873277 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-installation-pull-secrets\") pod \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.874178 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-registry-tls\") pod \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.874414 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.874449 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-ca-trust-extracted\") pod \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.874480 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-bound-sa-token\") pod \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.875954 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.879287 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.881121 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zdxw6\" (UniqueName: \"kubernetes.io/projected/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-kube-api-access-zdxw6\") pod \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\" (UID: \"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb\") " Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.881715 4818 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-registry-certificates\") on node \"crc\" DevicePath \"\"" Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.881771 4818 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-trusted-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.882235 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.885273 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.887556 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.888784 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-kube-api-access-zdxw6" (OuterVolumeSpecName: "kube-api-access-zdxw6") pod "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb"). InnerVolumeSpecName "kube-api-access-zdxw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.894809 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.897129 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb" (UID: "0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.983109 4818 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.983158 4818 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-bound-sa-token\") on node \"crc\" DevicePath \"\"" Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.983173 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zdxw6\" (UniqueName: \"kubernetes.io/projected/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-kube-api-access-zdxw6\") on node \"crc\" DevicePath \"\"" Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.983191 4818 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Sep 30 17:06:52 crc kubenswrapper[4818]: I0930 17:06:52.983212 4818 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb-registry-tls\") on node \"crc\" DevicePath \"\"" Sep 30 17:06:53 crc kubenswrapper[4818]: I0930 17:06:53.334014 4818 generic.go:334] "Generic (PLEG): container finished" podID="0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb" containerID="f3f918c7fd7a129ff7a0cf4d1ea50520fa54c7862d90ddc903d51cdb4e7d5dc8" exitCode=0 Sep 30 17:06:53 crc kubenswrapper[4818]: I0930 17:06:53.334061 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" event={"ID":"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb","Type":"ContainerDied","Data":"f3f918c7fd7a129ff7a0cf4d1ea50520fa54c7862d90ddc903d51cdb4e7d5dc8"} Sep 30 17:06:53 crc kubenswrapper[4818]: I0930 17:06:53.334092 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" event={"ID":"0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb","Type":"ContainerDied","Data":"cec52efde7303d349c8fd497103eb183b125578020f9ec34f8dda83fe6decb98"} Sep 30 17:06:53 crc kubenswrapper[4818]: I0930 17:06:53.334117 4818 scope.go:117] "RemoveContainer" containerID="f3f918c7fd7a129ff7a0cf4d1ea50520fa54c7862d90ddc903d51cdb4e7d5dc8" Sep 30 17:06:53 crc kubenswrapper[4818]: I0930 17:06:53.334114 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kml8f" Sep 30 17:06:53 crc kubenswrapper[4818]: I0930 17:06:53.374568 4818 scope.go:117] "RemoveContainer" containerID="f3f918c7fd7a129ff7a0cf4d1ea50520fa54c7862d90ddc903d51cdb4e7d5dc8" Sep 30 17:06:53 crc kubenswrapper[4818]: E0930 17:06:53.375749 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3f918c7fd7a129ff7a0cf4d1ea50520fa54c7862d90ddc903d51cdb4e7d5dc8\": container with ID starting with f3f918c7fd7a129ff7a0cf4d1ea50520fa54c7862d90ddc903d51cdb4e7d5dc8 not found: ID does not exist" containerID="f3f918c7fd7a129ff7a0cf4d1ea50520fa54c7862d90ddc903d51cdb4e7d5dc8" Sep 30 17:06:53 crc kubenswrapper[4818]: I0930 17:06:53.375999 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3f918c7fd7a129ff7a0cf4d1ea50520fa54c7862d90ddc903d51cdb4e7d5dc8"} err="failed to get container status \"f3f918c7fd7a129ff7a0cf4d1ea50520fa54c7862d90ddc903d51cdb4e7d5dc8\": rpc error: code = NotFound desc = could not find container \"f3f918c7fd7a129ff7a0cf4d1ea50520fa54c7862d90ddc903d51cdb4e7d5dc8\": container with ID starting with f3f918c7fd7a129ff7a0cf4d1ea50520fa54c7862d90ddc903d51cdb4e7d5dc8 not found: ID does not exist" Sep 30 17:06:53 crc kubenswrapper[4818]: I0930 17:06:53.398672 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kml8f"] Sep 30 17:06:53 crc kubenswrapper[4818]: I0930 17:06:53.402173 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kml8f"] Sep 30 17:06:54 crc kubenswrapper[4818]: I0930 17:06:54.033114 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb" path="/var/lib/kubelet/pods/0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb/volumes" Sep 30 17:07:52 crc kubenswrapper[4818]: I0930 17:07:52.596430 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:07:52 crc kubenswrapper[4818]: I0930 17:07:52.597182 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:08:22 crc kubenswrapper[4818]: I0930 17:08:22.595642 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:08:22 crc kubenswrapper[4818]: I0930 17:08:22.596407 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:08:52 crc kubenswrapper[4818]: I0930 17:08:52.596173 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:08:52 crc kubenswrapper[4818]: I0930 17:08:52.596718 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:08:52 crc kubenswrapper[4818]: I0930 17:08:52.596765 4818 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 17:08:52 crc kubenswrapper[4818]: I0930 17:08:52.624344 4818 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"976854ec79c20d7638f4498aeb9ec7b57e80f726c0c53aeb06c9052ad1713c03"} pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 30 17:08:52 crc kubenswrapper[4818]: I0930 17:08:52.641540 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" containerID="cri-o://976854ec79c20d7638f4498aeb9ec7b57e80f726c0c53aeb06c9052ad1713c03" gracePeriod=600 Sep 30 17:08:53 crc kubenswrapper[4818]: I0930 17:08:53.085540 4818 generic.go:334] "Generic (PLEG): container finished" podID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerID="976854ec79c20d7638f4498aeb9ec7b57e80f726c0c53aeb06c9052ad1713c03" exitCode=0 Sep 30 17:08:53 crc kubenswrapper[4818]: I0930 17:08:53.085850 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerDied","Data":"976854ec79c20d7638f4498aeb9ec7b57e80f726c0c53aeb06c9052ad1713c03"} Sep 30 17:08:53 crc kubenswrapper[4818]: I0930 17:08:53.085887 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerStarted","Data":"f2e60af7181a017f3a998586cfb2fbfcd7d49b22c87395265f8c90eee19ee429"} Sep 30 17:08:53 crc kubenswrapper[4818]: I0930 17:08:53.085917 4818 scope.go:117] "RemoveContainer" containerID="f602cf125807b205ed113b667482f15568000c1fc70e3c7e75eecadfe6a02087" Sep 30 17:09:17 crc kubenswrapper[4818]: I0930 17:09:17.686080 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm"] Sep 30 17:09:17 crc kubenswrapper[4818]: E0930 17:09:17.686962 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb" containerName="registry" Sep 30 17:09:17 crc kubenswrapper[4818]: I0930 17:09:17.686982 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb" containerName="registry" Sep 30 17:09:17 crc kubenswrapper[4818]: I0930 17:09:17.687146 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e5fa0dd-8926-45e0-a31d-e6db6f1bdebb" containerName="registry" Sep 30 17:09:17 crc kubenswrapper[4818]: I0930 17:09:17.688360 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm" Sep 30 17:09:17 crc kubenswrapper[4818]: I0930 17:09:17.691473 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Sep 30 17:09:17 crc kubenswrapper[4818]: I0930 17:09:17.699589 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm"] Sep 30 17:09:17 crc kubenswrapper[4818]: I0930 17:09:17.739747 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9088c828-53ab-421b-8509-c350596da888-util\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm\" (UID: \"9088c828-53ab-421b-8509-c350596da888\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm" Sep 30 17:09:17 crc kubenswrapper[4818]: I0930 17:09:17.739839 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llwzd\" (UniqueName: \"kubernetes.io/projected/9088c828-53ab-421b-8509-c350596da888-kube-api-access-llwzd\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm\" (UID: \"9088c828-53ab-421b-8509-c350596da888\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm" Sep 30 17:09:17 crc kubenswrapper[4818]: I0930 17:09:17.739883 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9088c828-53ab-421b-8509-c350596da888-bundle\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm\" (UID: \"9088c828-53ab-421b-8509-c350596da888\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm" Sep 30 17:09:17 crc kubenswrapper[4818]: I0930 17:09:17.841433 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9088c828-53ab-421b-8509-c350596da888-util\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm\" (UID: \"9088c828-53ab-421b-8509-c350596da888\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm" Sep 30 17:09:17 crc kubenswrapper[4818]: I0930 17:09:17.841535 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llwzd\" (UniqueName: \"kubernetes.io/projected/9088c828-53ab-421b-8509-c350596da888-kube-api-access-llwzd\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm\" (UID: \"9088c828-53ab-421b-8509-c350596da888\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm" Sep 30 17:09:17 crc kubenswrapper[4818]: I0930 17:09:17.841575 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9088c828-53ab-421b-8509-c350596da888-bundle\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm\" (UID: \"9088c828-53ab-421b-8509-c350596da888\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm" Sep 30 17:09:17 crc kubenswrapper[4818]: I0930 17:09:17.842848 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9088c828-53ab-421b-8509-c350596da888-util\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm\" (UID: \"9088c828-53ab-421b-8509-c350596da888\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm" Sep 30 17:09:17 crc kubenswrapper[4818]: I0930 17:09:17.843206 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9088c828-53ab-421b-8509-c350596da888-bundle\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm\" (UID: \"9088c828-53ab-421b-8509-c350596da888\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm" Sep 30 17:09:17 crc kubenswrapper[4818]: I0930 17:09:17.867474 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llwzd\" (UniqueName: \"kubernetes.io/projected/9088c828-53ab-421b-8509-c350596da888-kube-api-access-llwzd\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm\" (UID: \"9088c828-53ab-421b-8509-c350596da888\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm" Sep 30 17:09:18 crc kubenswrapper[4818]: I0930 17:09:18.002592 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm" Sep 30 17:09:18 crc kubenswrapper[4818]: I0930 17:09:18.255113 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm"] Sep 30 17:09:19 crc kubenswrapper[4818]: I0930 17:09:19.254606 4818 generic.go:334] "Generic (PLEG): container finished" podID="9088c828-53ab-421b-8509-c350596da888" containerID="72a9df4b05f0ec7cec1786384a6e37dc3467672aa6dfcfad7f69536e725a2650" exitCode=0 Sep 30 17:09:19 crc kubenswrapper[4818]: I0930 17:09:19.254686 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm" event={"ID":"9088c828-53ab-421b-8509-c350596da888","Type":"ContainerDied","Data":"72a9df4b05f0ec7cec1786384a6e37dc3467672aa6dfcfad7f69536e725a2650"} Sep 30 17:09:19 crc kubenswrapper[4818]: I0930 17:09:19.254746 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm" event={"ID":"9088c828-53ab-421b-8509-c350596da888","Type":"ContainerStarted","Data":"759fbda04e079440ce29bfb6aa4a86273a4c387f3fa08239ef951d47c19fa070"} Sep 30 17:09:19 crc kubenswrapper[4818]: I0930 17:09:19.257590 4818 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 30 17:09:21 crc kubenswrapper[4818]: E0930 17:09:21.998001 4818 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9088c828_53ab_421b_8509_c350596da888.slice/crio-2f249983c2824221f815b21d2a4409485a8791906711dd0cbeb47bbfe0c7cb39.scope\": RecentStats: unable to find data in memory cache]" Sep 30 17:09:22 crc kubenswrapper[4818]: I0930 17:09:22.271738 4818 generic.go:334] "Generic (PLEG): container finished" podID="9088c828-53ab-421b-8509-c350596da888" containerID="2f249983c2824221f815b21d2a4409485a8791906711dd0cbeb47bbfe0c7cb39" exitCode=0 Sep 30 17:09:22 crc kubenswrapper[4818]: I0930 17:09:22.271824 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm" event={"ID":"9088c828-53ab-421b-8509-c350596da888","Type":"ContainerDied","Data":"2f249983c2824221f815b21d2a4409485a8791906711dd0cbeb47bbfe0c7cb39"} Sep 30 17:09:23 crc kubenswrapper[4818]: I0930 17:09:23.281407 4818 generic.go:334] "Generic (PLEG): container finished" podID="9088c828-53ab-421b-8509-c350596da888" containerID="c944216fe0170387bfcd44a5fef81b2eb122755d715c8094c12bce34c92c22d1" exitCode=0 Sep 30 17:09:23 crc kubenswrapper[4818]: I0930 17:09:23.281466 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm" event={"ID":"9088c828-53ab-421b-8509-c350596da888","Type":"ContainerDied","Data":"c944216fe0170387bfcd44a5fef81b2eb122755d715c8094c12bce34c92c22d1"} Sep 30 17:09:24 crc kubenswrapper[4818]: I0930 17:09:24.608341 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm" Sep 30 17:09:24 crc kubenswrapper[4818]: I0930 17:09:24.728660 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9088c828-53ab-421b-8509-c350596da888-util\") pod \"9088c828-53ab-421b-8509-c350596da888\" (UID: \"9088c828-53ab-421b-8509-c350596da888\") " Sep 30 17:09:24 crc kubenswrapper[4818]: I0930 17:09:24.728754 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llwzd\" (UniqueName: \"kubernetes.io/projected/9088c828-53ab-421b-8509-c350596da888-kube-api-access-llwzd\") pod \"9088c828-53ab-421b-8509-c350596da888\" (UID: \"9088c828-53ab-421b-8509-c350596da888\") " Sep 30 17:09:24 crc kubenswrapper[4818]: I0930 17:09:24.728798 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9088c828-53ab-421b-8509-c350596da888-bundle\") pod \"9088c828-53ab-421b-8509-c350596da888\" (UID: \"9088c828-53ab-421b-8509-c350596da888\") " Sep 30 17:09:24 crc kubenswrapper[4818]: I0930 17:09:24.732741 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9088c828-53ab-421b-8509-c350596da888-bundle" (OuterVolumeSpecName: "bundle") pod "9088c828-53ab-421b-8509-c350596da888" (UID: "9088c828-53ab-421b-8509-c350596da888"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:09:24 crc kubenswrapper[4818]: I0930 17:09:24.737952 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9088c828-53ab-421b-8509-c350596da888-kube-api-access-llwzd" (OuterVolumeSpecName: "kube-api-access-llwzd") pod "9088c828-53ab-421b-8509-c350596da888" (UID: "9088c828-53ab-421b-8509-c350596da888"). InnerVolumeSpecName "kube-api-access-llwzd". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:09:24 crc kubenswrapper[4818]: I0930 17:09:24.742173 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9088c828-53ab-421b-8509-c350596da888-util" (OuterVolumeSpecName: "util") pod "9088c828-53ab-421b-8509-c350596da888" (UID: "9088c828-53ab-421b-8509-c350596da888"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:09:24 crc kubenswrapper[4818]: I0930 17:09:24.830815 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llwzd\" (UniqueName: \"kubernetes.io/projected/9088c828-53ab-421b-8509-c350596da888-kube-api-access-llwzd\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:24 crc kubenswrapper[4818]: I0930 17:09:24.830865 4818 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9088c828-53ab-421b-8509-c350596da888-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:24 crc kubenswrapper[4818]: I0930 17:09:24.830885 4818 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9088c828-53ab-421b-8509-c350596da888-util\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:25 crc kubenswrapper[4818]: I0930 17:09:25.298951 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm" event={"ID":"9088c828-53ab-421b-8509-c350596da888","Type":"ContainerDied","Data":"759fbda04e079440ce29bfb6aa4a86273a4c387f3fa08239ef951d47c19fa070"} Sep 30 17:09:25 crc kubenswrapper[4818]: I0930 17:09:25.299007 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="759fbda04e079440ce29bfb6aa4a86273a4c387f3fa08239ef951d47c19fa070" Sep 30 17:09:25 crc kubenswrapper[4818]: I0930 17:09:25.299111 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm" Sep 30 17:09:28 crc kubenswrapper[4818]: I0930 17:09:28.863884 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-ljmfd"] Sep 30 17:09:28 crc kubenswrapper[4818]: I0930 17:09:28.864832 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovn-controller" containerID="cri-o://bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445" gracePeriod=30 Sep 30 17:09:28 crc kubenswrapper[4818]: I0930 17:09:28.864980 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovn-acl-logging" containerID="cri-o://95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193" gracePeriod=30 Sep 30 17:09:28 crc kubenswrapper[4818]: I0930 17:09:28.865010 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="northd" containerID="cri-o://db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931" gracePeriod=30 Sep 30 17:09:28 crc kubenswrapper[4818]: I0930 17:09:28.865046 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="kube-rbac-proxy-node" containerID="cri-o://423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70" gracePeriod=30 Sep 30 17:09:28 crc kubenswrapper[4818]: I0930 17:09:28.865150 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063" gracePeriod=30 Sep 30 17:09:28 crc kubenswrapper[4818]: I0930 17:09:28.865227 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="sbdb" containerID="cri-o://d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0" gracePeriod=30 Sep 30 17:09:28 crc kubenswrapper[4818]: I0930 17:09:28.865268 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="nbdb" containerID="cri-o://1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa" gracePeriod=30 Sep 30 17:09:28 crc kubenswrapper[4818]: I0930 17:09:28.928221 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovnkube-controller" containerID="cri-o://98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a" gracePeriod=30 Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.215650 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-ljmfd_68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/ovnkube-controller/3.log" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.218607 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-ljmfd_68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/ovn-acl-logging/0.log" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.219216 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-ljmfd_68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/ovn-controller/0.log" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.219759 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.284761 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-n7vzp"] Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.285100 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovnkube-controller" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285123 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovnkube-controller" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.285136 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovn-acl-logging" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285147 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovn-acl-logging" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.285166 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9088c828-53ab-421b-8509-c350596da888" containerName="pull" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285175 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="9088c828-53ab-421b-8509-c350596da888" containerName="pull" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.285185 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovn-controller" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285193 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovn-controller" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.285232 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="kube-rbac-proxy-ovn-metrics" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285242 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="kube-rbac-proxy-ovn-metrics" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.285259 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="sbdb" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285268 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="sbdb" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.285279 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="nbdb" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285288 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="nbdb" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.285299 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovnkube-controller" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285307 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovnkube-controller" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.285318 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="kubecfg-setup" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285326 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="kubecfg-setup" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.285339 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="kube-rbac-proxy-node" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285348 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="kube-rbac-proxy-node" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.285360 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovnkube-controller" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285371 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovnkube-controller" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.285380 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="northd" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285388 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="northd" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.285398 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9088c828-53ab-421b-8509-c350596da888" containerName="util" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285408 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="9088c828-53ab-421b-8509-c350596da888" containerName="util" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.285435 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9088c828-53ab-421b-8509-c350596da888" containerName="extract" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285443 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="9088c828-53ab-421b-8509-c350596da888" containerName="extract" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.285456 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovnkube-controller" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285465 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovnkube-controller" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285581 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovn-controller" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285608 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovnkube-controller" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285618 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="kube-rbac-proxy-ovn-metrics" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285629 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovnkube-controller" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285637 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="northd" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285651 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="kube-rbac-proxy-node" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285662 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="sbdb" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285672 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovnkube-controller" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285684 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovn-acl-logging" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285695 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="9088c828-53ab-421b-8509-c350596da888" containerName="extract" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285713 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="nbdb" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.285835 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovnkube-controller" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.285846 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovnkube-controller" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.287162 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovnkube-controller" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.287187 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerName="ovnkube-controller" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.292400 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.326386 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-ljmfd_68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/ovnkube-controller/3.log" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.334407 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-ljmfd_68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/ovn-acl-logging/0.log" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.335305 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-ljmfd_68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/ovn-controller/0.log" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.336012 4818 generic.go:334] "Generic (PLEG): container finished" podID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerID="98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a" exitCode=0 Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.336087 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.336670 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerDied","Data":"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.336719 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerDied","Data":"d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.336749 4818 scope.go:117] "RemoveContainer" containerID="98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.336684 4818 generic.go:334] "Generic (PLEG): container finished" podID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerID="d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0" exitCode=0 Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.336814 4818 generic.go:334] "Generic (PLEG): container finished" podID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerID="1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa" exitCode=0 Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.336832 4818 generic.go:334] "Generic (PLEG): container finished" podID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerID="db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931" exitCode=0 Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.336850 4818 generic.go:334] "Generic (PLEG): container finished" podID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerID="1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063" exitCode=0 Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.336868 4818 generic.go:334] "Generic (PLEG): container finished" podID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerID="423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70" exitCode=0 Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.336882 4818 generic.go:334] "Generic (PLEG): container finished" podID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerID="95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193" exitCode=143 Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.336889 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerDied","Data":"1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.336944 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerDied","Data":"db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.336967 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerDied","Data":"1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.336985 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerDied","Data":"423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337000 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337015 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337025 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337034 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337043 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337052 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337061 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337071 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337079 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337108 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerDied","Data":"95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337124 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337135 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337145 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337154 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337165 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337197 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337207 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337216 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337225 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337234 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337247 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerDied","Data":"bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337262 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337272 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337281 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.336896 4818 generic.go:334] "Generic (PLEG): container finished" podID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" containerID="bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445" exitCode=143 Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337289 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337403 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337429 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337442 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337454 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337465 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337475 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337501 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ljmfd" event={"ID":"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6","Type":"ContainerDied","Data":"e9300817e2b47f72209ac87ee8ed546e5d7f71915f34a2e48144bd2dc6ca4bb7"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337536 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337547 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337556 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337566 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337576 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337584 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337594 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337603 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337612 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.337622 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.339561 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hq6j2_d36fce8a-ff27-48bf-be9c-67fc2046136d/kube-multus/2.log" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.340439 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hq6j2_d36fce8a-ff27-48bf-be9c-67fc2046136d/kube-multus/1.log" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.340486 4818 generic.go:334] "Generic (PLEG): container finished" podID="d36fce8a-ff27-48bf-be9c-67fc2046136d" containerID="16f00b29df5ca66c8eb980b856c8659e891bb2ee5eec8c4baf8196533a20321b" exitCode=2 Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.340515 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hq6j2" event={"ID":"d36fce8a-ff27-48bf-be9c-67fc2046136d","Type":"ContainerDied","Data":"16f00b29df5ca66c8eb980b856c8659e891bb2ee5eec8c4baf8196533a20321b"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.340535 4818 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9b2cdabb8638db6c90e9b3623898192035c1688291bf0f7e0ffbd32f2cd12d35"} Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.341085 4818 scope.go:117] "RemoveContainer" containerID="16f00b29df5ca66c8eb980b856c8659e891bb2ee5eec8c4baf8196533a20321b" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.341388 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-hq6j2_openshift-multus(d36fce8a-ff27-48bf-be9c-67fc2046136d)\"" pod="openshift-multus/multus-hq6j2" podUID="d36fce8a-ff27-48bf-be9c-67fc2046136d" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.362993 4818 scope.go:117] "RemoveContainer" containerID="fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.381505 4818 scope.go:117] "RemoveContainer" containerID="d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.397133 4818 scope.go:117] "RemoveContainer" containerID="1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399140 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxqgg\" (UniqueName: \"kubernetes.io/projected/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-kube-api-access-vxqgg\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399169 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-run-ovn\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399192 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-env-overrides\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399213 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-ovnkube-script-lib\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399228 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-systemd-units\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399251 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-ovnkube-config\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399271 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-run-systemd\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399290 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-kubelet\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399284 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399305 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-cni-bin\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399335 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399349 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399393 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-cni-netd\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399426 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399447 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-run-openvswitch\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399472 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-log-socket\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399492 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-run-ovn-kubernetes\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399514 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-run-netns\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399538 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-var-lib-openvswitch\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399556 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-etc-openvswitch\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399592 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-node-log\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399620 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-ovn-node-metrics-cert\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399660 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-slash\") pod \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\" (UID: \"68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6\") " Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399752 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399796 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399817 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-kubelet\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399851 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-run-netns\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399875 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-slash\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399940 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-node-log\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399968 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-cni-bin\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400006 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a8c48898-be34-41fd-81c8-ad4745c3c0fb-ovnkube-config\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400048 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a8c48898-be34-41fd-81c8-ad4745c3c0fb-env-overrides\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400072 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a8c48898-be34-41fd-81c8-ad4745c3c0fb-ovn-node-metrics-cert\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400100 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trfzt\" (UniqueName: \"kubernetes.io/projected/a8c48898-be34-41fd-81c8-ad4745c3c0fb-kube-api-access-trfzt\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399823 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399841 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399856 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399872 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400151 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-slash" (OuterVolumeSpecName: "host-slash") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.399892 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400019 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400066 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-log-socket" (OuterVolumeSpecName: "log-socket") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400070 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400090 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400086 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400104 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-node-log" (OuterVolumeSpecName: "node-log") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400293 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-systemd-units\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400322 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a8c48898-be34-41fd-81c8-ad4745c3c0fb-ovnkube-script-lib\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400338 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-run-ovn\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400357 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-var-lib-openvswitch\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400568 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-etc-openvswitch\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400583 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-run-openvswitch\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400616 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-cni-netd\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400651 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-run-ovn-kubernetes\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400686 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400714 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-log-socket\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400732 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-run-systemd\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400783 4818 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-env-overrides\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400792 4818 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400801 4818 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-systemd-units\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400810 4818 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-ovnkube-config\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400819 4818 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-kubelet\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400830 4818 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-cni-bin\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400838 4818 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-cni-netd\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400846 4818 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400854 4818 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-run-openvswitch\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400862 4818 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-log-socket\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400869 4818 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400877 4818 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-run-netns\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400885 4818 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400893 4818 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400902 4818 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-node-log\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400910 4818 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-host-slash\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.400918 4818 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-run-ovn\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.413902 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-kube-api-access-vxqgg" (OuterVolumeSpecName: "kube-api-access-vxqgg") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "kube-api-access-vxqgg". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.415109 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.417429 4818 scope.go:117] "RemoveContainer" containerID="db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.419374 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" (UID: "68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.433612 4818 scope.go:117] "RemoveContainer" containerID="1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.451810 4818 scope.go:117] "RemoveContainer" containerID="423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.465374 4818 scope.go:117] "RemoveContainer" containerID="95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.483494 4818 scope.go:117] "RemoveContainer" containerID="bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502072 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a8c48898-be34-41fd-81c8-ad4745c3c0fb-ovnkube-script-lib\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502114 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-run-ovn\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502136 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-var-lib-openvswitch\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502155 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-etc-openvswitch\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502171 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-run-openvswitch\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502191 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-cni-netd\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502209 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-run-ovn-kubernetes\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502226 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502245 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-log-socket\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502263 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-run-systemd\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502252 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-run-ovn\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502285 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-kubelet\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502296 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-etc-openvswitch\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502329 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-run-ovn-kubernetes\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502340 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-run-systemd\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502305 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-run-netns\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502361 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-kubelet\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502318 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502280 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-cni-netd\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502342 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-run-netns\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502370 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-log-socket\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502441 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-run-openvswitch\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502726 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-slash\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502731 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-var-lib-openvswitch\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502758 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-slash\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502881 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-node-log\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502940 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-cni-bin\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502960 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-node-log\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.502971 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a8c48898-be34-41fd-81c8-ad4745c3c0fb-ovnkube-script-lib\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.503000 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-host-cni-bin\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.503039 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a8c48898-be34-41fd-81c8-ad4745c3c0fb-ovnkube-config\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.503094 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a8c48898-be34-41fd-81c8-ad4745c3c0fb-env-overrides\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.503117 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a8c48898-be34-41fd-81c8-ad4745c3c0fb-ovn-node-metrics-cert\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.503165 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trfzt\" (UniqueName: \"kubernetes.io/projected/a8c48898-be34-41fd-81c8-ad4745c3c0fb-kube-api-access-trfzt\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.503268 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-systemd-units\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.503366 4818 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.503385 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vxqgg\" (UniqueName: \"kubernetes.io/projected/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-kube-api-access-vxqgg\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.503398 4818 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6-run-systemd\") on node \"crc\" DevicePath \"\"" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.503407 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a8c48898-be34-41fd-81c8-ad4745c3c0fb-env-overrides\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.503435 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a8c48898-be34-41fd-81c8-ad4745c3c0fb-systemd-units\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.503781 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a8c48898-be34-41fd-81c8-ad4745c3c0fb-ovnkube-config\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.508392 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a8c48898-be34-41fd-81c8-ad4745c3c0fb-ovn-node-metrics-cert\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.513853 4818 scope.go:117] "RemoveContainer" containerID="177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.557570 4818 scope.go:117] "RemoveContainer" containerID="98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.558112 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a\": container with ID starting with 98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a not found: ID does not exist" containerID="98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.558164 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a"} err="failed to get container status \"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a\": rpc error: code = NotFound desc = could not find container \"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a\": container with ID starting with 98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.558189 4818 scope.go:117] "RemoveContainer" containerID="fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.558614 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7\": container with ID starting with fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7 not found: ID does not exist" containerID="fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.558650 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7"} err="failed to get container status \"fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7\": rpc error: code = NotFound desc = could not find container \"fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7\": container with ID starting with fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.558669 4818 scope.go:117] "RemoveContainer" containerID="d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.559182 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\": container with ID starting with d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0 not found: ID does not exist" containerID="d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.559208 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0"} err="failed to get container status \"d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\": rpc error: code = NotFound desc = could not find container \"d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\": container with ID starting with d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.559236 4818 scope.go:117] "RemoveContainer" containerID="1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.559541 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\": container with ID starting with 1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa not found: ID does not exist" containerID="1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.559561 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa"} err="failed to get container status \"1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\": rpc error: code = NotFound desc = could not find container \"1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\": container with ID starting with 1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.559577 4818 scope.go:117] "RemoveContainer" containerID="db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.560247 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\": container with ID starting with db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931 not found: ID does not exist" containerID="db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.560299 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931"} err="failed to get container status \"db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\": rpc error: code = NotFound desc = could not find container \"db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\": container with ID starting with db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.560340 4818 scope.go:117] "RemoveContainer" containerID="1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.560711 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\": container with ID starting with 1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063 not found: ID does not exist" containerID="1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.560748 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063"} err="failed to get container status \"1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\": rpc error: code = NotFound desc = could not find container \"1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\": container with ID starting with 1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.560771 4818 scope.go:117] "RemoveContainer" containerID="423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.561218 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\": container with ID starting with 423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70 not found: ID does not exist" containerID="423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.561246 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70"} err="failed to get container status \"423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\": rpc error: code = NotFound desc = could not find container \"423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\": container with ID starting with 423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.561261 4818 scope.go:117] "RemoveContainer" containerID="95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.561569 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\": container with ID starting with 95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193 not found: ID does not exist" containerID="95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.561600 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193"} err="failed to get container status \"95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\": rpc error: code = NotFound desc = could not find container \"95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\": container with ID starting with 95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.561613 4818 scope.go:117] "RemoveContainer" containerID="bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.561944 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\": container with ID starting with bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445 not found: ID does not exist" containerID="bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.561976 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445"} err="failed to get container status \"bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\": rpc error: code = NotFound desc = could not find container \"bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\": container with ID starting with bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.561992 4818 scope.go:117] "RemoveContainer" containerID="177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e" Sep 30 17:09:29 crc kubenswrapper[4818]: E0930 17:09:29.570465 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\": container with ID starting with 177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e not found: ID does not exist" containerID="177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.570509 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e"} err="failed to get container status \"177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\": rpc error: code = NotFound desc = could not find container \"177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\": container with ID starting with 177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.570543 4818 scope.go:117] "RemoveContainer" containerID="98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.571194 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a"} err="failed to get container status \"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a\": rpc error: code = NotFound desc = could not find container \"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a\": container with ID starting with 98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.571261 4818 scope.go:117] "RemoveContainer" containerID="fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.571636 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7"} err="failed to get container status \"fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7\": rpc error: code = NotFound desc = could not find container \"fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7\": container with ID starting with fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.571670 4818 scope.go:117] "RemoveContainer" containerID="d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.572251 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0"} err="failed to get container status \"d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\": rpc error: code = NotFound desc = could not find container \"d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\": container with ID starting with d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.572277 4818 scope.go:117] "RemoveContainer" containerID="1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.572540 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa"} err="failed to get container status \"1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\": rpc error: code = NotFound desc = could not find container \"1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\": container with ID starting with 1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.572563 4818 scope.go:117] "RemoveContainer" containerID="db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.572830 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931"} err="failed to get container status \"db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\": rpc error: code = NotFound desc = could not find container \"db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\": container with ID starting with db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.572863 4818 scope.go:117] "RemoveContainer" containerID="1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.573201 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063"} err="failed to get container status \"1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\": rpc error: code = NotFound desc = could not find container \"1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\": container with ID starting with 1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.573225 4818 scope.go:117] "RemoveContainer" containerID="423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.573535 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70"} err="failed to get container status \"423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\": rpc error: code = NotFound desc = could not find container \"423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\": container with ID starting with 423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.573557 4818 scope.go:117] "RemoveContainer" containerID="95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.573877 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193"} err="failed to get container status \"95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\": rpc error: code = NotFound desc = could not find container \"95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\": container with ID starting with 95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.573900 4818 scope.go:117] "RemoveContainer" containerID="bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.574157 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445"} err="failed to get container status \"bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\": rpc error: code = NotFound desc = could not find container \"bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\": container with ID starting with bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.574177 4818 scope.go:117] "RemoveContainer" containerID="177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.574393 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e"} err="failed to get container status \"177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\": rpc error: code = NotFound desc = could not find container \"177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\": container with ID starting with 177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.574415 4818 scope.go:117] "RemoveContainer" containerID="98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.574676 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a"} err="failed to get container status \"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a\": rpc error: code = NotFound desc = could not find container \"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a\": container with ID starting with 98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.574694 4818 scope.go:117] "RemoveContainer" containerID="fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.574897 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7"} err="failed to get container status \"fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7\": rpc error: code = NotFound desc = could not find container \"fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7\": container with ID starting with fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.574936 4818 scope.go:117] "RemoveContainer" containerID="d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.575254 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0"} err="failed to get container status \"d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\": rpc error: code = NotFound desc = could not find container \"d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\": container with ID starting with d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.575281 4818 scope.go:117] "RemoveContainer" containerID="1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.575589 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa"} err="failed to get container status \"1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\": rpc error: code = NotFound desc = could not find container \"1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\": container with ID starting with 1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.575634 4818 scope.go:117] "RemoveContainer" containerID="db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.575962 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931"} err="failed to get container status \"db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\": rpc error: code = NotFound desc = could not find container \"db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\": container with ID starting with db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.575987 4818 scope.go:117] "RemoveContainer" containerID="1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.576249 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063"} err="failed to get container status \"1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\": rpc error: code = NotFound desc = could not find container \"1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\": container with ID starting with 1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.576269 4818 scope.go:117] "RemoveContainer" containerID="423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.576547 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70"} err="failed to get container status \"423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\": rpc error: code = NotFound desc = could not find container \"423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\": container with ID starting with 423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.576568 4818 scope.go:117] "RemoveContainer" containerID="95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.576782 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193"} err="failed to get container status \"95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\": rpc error: code = NotFound desc = could not find container \"95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\": container with ID starting with 95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.576801 4818 scope.go:117] "RemoveContainer" containerID="bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.577058 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445"} err="failed to get container status \"bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\": rpc error: code = NotFound desc = could not find container \"bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\": container with ID starting with bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.577077 4818 scope.go:117] "RemoveContainer" containerID="177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.577352 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e"} err="failed to get container status \"177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\": rpc error: code = NotFound desc = could not find container \"177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\": container with ID starting with 177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.577383 4818 scope.go:117] "RemoveContainer" containerID="98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.577669 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a"} err="failed to get container status \"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a\": rpc error: code = NotFound desc = could not find container \"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a\": container with ID starting with 98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.577697 4818 scope.go:117] "RemoveContainer" containerID="fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.577961 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7"} err="failed to get container status \"fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7\": rpc error: code = NotFound desc = could not find container \"fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7\": container with ID starting with fcf839c771aec3b2e17cede3154a515aea893a5005753fbf5a6ac1bc1c0ae0f7 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.577984 4818 scope.go:117] "RemoveContainer" containerID="d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.578433 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0"} err="failed to get container status \"d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\": rpc error: code = NotFound desc = could not find container \"d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0\": container with ID starting with d65eae8668f62bc3f87e917ab96370f1b4ce9a807b4ba883c48f66f76cc053e0 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.578463 4818 scope.go:117] "RemoveContainer" containerID="1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.578755 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa"} err="failed to get container status \"1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\": rpc error: code = NotFound desc = could not find container \"1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa\": container with ID starting with 1dc36c13b6f6ed9db1873800142ee7c5adee3073eb88bc63263b4357a4892dfa not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.578810 4818 scope.go:117] "RemoveContainer" containerID="db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.580153 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931"} err="failed to get container status \"db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\": rpc error: code = NotFound desc = could not find container \"db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931\": container with ID starting with db69d260560c4edf49f16c690f10018d6a6f9a7d3f37d23f0a7ee97034242931 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.580215 4818 scope.go:117] "RemoveContainer" containerID="1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.580568 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063"} err="failed to get container status \"1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\": rpc error: code = NotFound desc = could not find container \"1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063\": container with ID starting with 1d2326ca6510d8b7121bb3fe4880931d82d10d0b9e11dbcffbd33fc8fb439063 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.580593 4818 scope.go:117] "RemoveContainer" containerID="423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.580904 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trfzt\" (UniqueName: \"kubernetes.io/projected/a8c48898-be34-41fd-81c8-ad4745c3c0fb-kube-api-access-trfzt\") pod \"ovnkube-node-n7vzp\" (UID: \"a8c48898-be34-41fd-81c8-ad4745c3c0fb\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.580913 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70"} err="failed to get container status \"423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\": rpc error: code = NotFound desc = could not find container \"423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70\": container with ID starting with 423dc74bc0aad2c2402705fb8697bfc276438bd38bd2ee0b13cda0786e6e4f70 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.581016 4818 scope.go:117] "RemoveContainer" containerID="95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.581358 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193"} err="failed to get container status \"95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\": rpc error: code = NotFound desc = could not find container \"95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193\": container with ID starting with 95245482edc45018f5f890cba146aa3c3cf169530b0709c62ad7496112222193 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.581384 4818 scope.go:117] "RemoveContainer" containerID="bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.584556 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445"} err="failed to get container status \"bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\": rpc error: code = NotFound desc = could not find container \"bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445\": container with ID starting with bbdbbc071eaec6e6fab62b52bb26ec1b59e73c6af27122b22b4e60e9939e1445 not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.584617 4818 scope.go:117] "RemoveContainer" containerID="177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.585086 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e"} err="failed to get container status \"177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\": rpc error: code = NotFound desc = could not find container \"177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e\": container with ID starting with 177c1d1ca8cb2b3048c61b5e8a51db8fde100fe0fb1d803c99ffdac2b5921c8e not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.585142 4818 scope.go:117] "RemoveContainer" containerID="98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.585602 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a"} err="failed to get container status \"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a\": rpc error: code = NotFound desc = could not find container \"98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a\": container with ID starting with 98d7791d044b6dc7ca71eeac9262feaa7cd663ec7f04a676411efc6b7d3f2f9a not found: ID does not exist" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.607366 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.697722 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-ljmfd"] Sep 30 17:09:29 crc kubenswrapper[4818]: I0930 17:09:29.706898 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-ljmfd"] Sep 30 17:09:30 crc kubenswrapper[4818]: I0930 17:09:30.026417 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6" path="/var/lib/kubelet/pods/68c5c05a-d7b3-4e6d-b2a8-166bb36df9e6/volumes" Sep 30 17:09:30 crc kubenswrapper[4818]: I0930 17:09:30.346649 4818 generic.go:334] "Generic (PLEG): container finished" podID="a8c48898-be34-41fd-81c8-ad4745c3c0fb" containerID="a9043f9de1a74f7269d13be247583ec104aea7e97f2809307130bbabc7918e81" exitCode=0 Sep 30 17:09:30 crc kubenswrapper[4818]: I0930 17:09:30.346701 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" event={"ID":"a8c48898-be34-41fd-81c8-ad4745c3c0fb","Type":"ContainerDied","Data":"a9043f9de1a74f7269d13be247583ec104aea7e97f2809307130bbabc7918e81"} Sep 30 17:09:30 crc kubenswrapper[4818]: I0930 17:09:30.346729 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" event={"ID":"a8c48898-be34-41fd-81c8-ad4745c3c0fb","Type":"ContainerStarted","Data":"46d3dbbc205151df5dc32151b52be534731d6726f3f82039d9ed2dcc2d866fc7"} Sep 30 17:09:31 crc kubenswrapper[4818]: I0930 17:09:31.355140 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" event={"ID":"a8c48898-be34-41fd-81c8-ad4745c3c0fb","Type":"ContainerStarted","Data":"381962cb4873368cc2411a95fc4a8b3f32718c0e3e919840817570d054f351ee"} Sep 30 17:09:31 crc kubenswrapper[4818]: I0930 17:09:31.355418 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" event={"ID":"a8c48898-be34-41fd-81c8-ad4745c3c0fb","Type":"ContainerStarted","Data":"811cfc1a2695dc4ed02fb6a0cb8c3b8b4da607fc7134b13b0cd868c8cb5b8043"} Sep 30 17:09:31 crc kubenswrapper[4818]: I0930 17:09:31.355429 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" event={"ID":"a8c48898-be34-41fd-81c8-ad4745c3c0fb","Type":"ContainerStarted","Data":"6fead1cb1e346a5bea17a0cf748fce586d432515c4a4b68376139bc6b560f88c"} Sep 30 17:09:31 crc kubenswrapper[4818]: I0930 17:09:31.355440 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" event={"ID":"a8c48898-be34-41fd-81c8-ad4745c3c0fb","Type":"ContainerStarted","Data":"41c349963a060cb95a3e82cbf76bc4797fbc61dc3d3f1feb562fe6410a32b886"} Sep 30 17:09:31 crc kubenswrapper[4818]: I0930 17:09:31.355448 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" event={"ID":"a8c48898-be34-41fd-81c8-ad4745c3c0fb","Type":"ContainerStarted","Data":"bab55fd78f35cfa52b62957545cbee710e929765be09ccd4e0902eb2819642b0"} Sep 30 17:09:31 crc kubenswrapper[4818]: I0930 17:09:31.355457 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" event={"ID":"a8c48898-be34-41fd-81c8-ad4745c3c0fb","Type":"ContainerStarted","Data":"81290af3253a666ebf268cf0afccc45b41fa29c7b3027841b01b2529e68c8a3a"} Sep 30 17:09:33 crc kubenswrapper[4818]: I0930 17:09:33.373387 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" event={"ID":"a8c48898-be34-41fd-81c8-ad4745c3c0fb","Type":"ContainerStarted","Data":"e87936becac8941d95bf85aa6d9cd05bf0bb3f4c7d793de9500dc56edc48c807"} Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.127232 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d"] Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.128296 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.129971 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-7w6fv" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.130252 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.130707 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.179624 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2grn6\" (UniqueName: \"kubernetes.io/projected/96170b1f-1f7b-45df-a1e4-5d9901097907-kube-api-access-2grn6\") pod \"obo-prometheus-operator-7c8cf85677-bzv8d\" (UID: \"96170b1f-1f7b-45df-a1e4-5d9901097907\") " pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.248436 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd"] Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.249482 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.251411 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-78d7g" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.252155 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.263999 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg"] Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.264990 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.280635 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/118fa0c6-c8bf-4ae6-9867-9aaf6ee11824-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg\" (UID: \"118fa0c6-c8bf-4ae6-9867-9aaf6ee11824\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.280683 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/73811ba7-972e-40ea-a82d-40a5e56341c4-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd\" (UID: \"73811ba7-972e-40ea-a82d-40a5e56341c4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.280702 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/118fa0c6-c8bf-4ae6-9867-9aaf6ee11824-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg\" (UID: \"118fa0c6-c8bf-4ae6-9867-9aaf6ee11824\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.280729 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/73811ba7-972e-40ea-a82d-40a5e56341c4-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd\" (UID: \"73811ba7-972e-40ea-a82d-40a5e56341c4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.280871 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2grn6\" (UniqueName: \"kubernetes.io/projected/96170b1f-1f7b-45df-a1e4-5d9901097907-kube-api-access-2grn6\") pod \"obo-prometheus-operator-7c8cf85677-bzv8d\" (UID: \"96170b1f-1f7b-45df-a1e4-5d9901097907\") " pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.304532 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2grn6\" (UniqueName: \"kubernetes.io/projected/96170b1f-1f7b-45df-a1e4-5d9901097907-kube-api-access-2grn6\") pod \"obo-prometheus-operator-7c8cf85677-bzv8d\" (UID: \"96170b1f-1f7b-45df-a1e4-5d9901097907\") " pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.351111 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-cc5f78dfc-8zx45"] Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.352137 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.355305 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.355875 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-s7gm8" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.381875 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/73811ba7-972e-40ea-a82d-40a5e56341c4-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd\" (UID: \"73811ba7-972e-40ea-a82d-40a5e56341c4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.381954 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/de961e2b-b16d-4db1-b908-5be30a74be3d-observability-operator-tls\") pod \"observability-operator-cc5f78dfc-8zx45\" (UID: \"de961e2b-b16d-4db1-b908-5be30a74be3d\") " pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.382011 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/118fa0c6-c8bf-4ae6-9867-9aaf6ee11824-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg\" (UID: \"118fa0c6-c8bf-4ae6-9867-9aaf6ee11824\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.382044 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/73811ba7-972e-40ea-a82d-40a5e56341c4-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd\" (UID: \"73811ba7-972e-40ea-a82d-40a5e56341c4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.382065 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/118fa0c6-c8bf-4ae6-9867-9aaf6ee11824-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg\" (UID: \"118fa0c6-c8bf-4ae6-9867-9aaf6ee11824\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.382102 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftv4t\" (UniqueName: \"kubernetes.io/projected/de961e2b-b16d-4db1-b908-5be30a74be3d-kube-api-access-ftv4t\") pod \"observability-operator-cc5f78dfc-8zx45\" (UID: \"de961e2b-b16d-4db1-b908-5be30a74be3d\") " pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.386134 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/73811ba7-972e-40ea-a82d-40a5e56341c4-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd\" (UID: \"73811ba7-972e-40ea-a82d-40a5e56341c4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.386147 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/73811ba7-972e-40ea-a82d-40a5e56341c4-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd\" (UID: \"73811ba7-972e-40ea-a82d-40a5e56341c4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.386481 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/118fa0c6-c8bf-4ae6-9867-9aaf6ee11824-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg\" (UID: \"118fa0c6-c8bf-4ae6-9867-9aaf6ee11824\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.387297 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/118fa0c6-c8bf-4ae6-9867-9aaf6ee11824-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg\" (UID: \"118fa0c6-c8bf-4ae6-9867-9aaf6ee11824\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.446582 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.446592 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-54bc95c9fb-qnpbg"] Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.447758 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.451668 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-fcp76" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.473251 4818 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-7c8cf85677-bzv8d_openshift-operators_96170b1f-1f7b-45df-a1e4-5d9901097907_0(2f14a66947c2e8924cf8e487a1814bd522015889cb1e0fdc5671f3fdf53b135f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.473333 4818 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-7c8cf85677-bzv8d_openshift-operators_96170b1f-1f7b-45df-a1e4-5d9901097907_0(2f14a66947c2e8924cf8e487a1814bd522015889cb1e0fdc5671f3fdf53b135f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.473362 4818 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-7c8cf85677-bzv8d_openshift-operators_96170b1f-1f7b-45df-a1e4-5d9901097907_0(2f14a66947c2e8924cf8e487a1814bd522015889cb1e0fdc5671f3fdf53b135f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.473421 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-7c8cf85677-bzv8d_openshift-operators(96170b1f-1f7b-45df-a1e4-5d9901097907)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-7c8cf85677-bzv8d_openshift-operators(96170b1f-1f7b-45df-a1e4-5d9901097907)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-7c8cf85677-bzv8d_openshift-operators_96170b1f-1f7b-45df-a1e4-5d9901097907_0(2f14a66947c2e8924cf8e487a1814bd522015889cb1e0fdc5671f3fdf53b135f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" podUID="96170b1f-1f7b-45df-a1e4-5d9901097907" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.482753 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftv4t\" (UniqueName: \"kubernetes.io/projected/de961e2b-b16d-4db1-b908-5be30a74be3d-kube-api-access-ftv4t\") pod \"observability-operator-cc5f78dfc-8zx45\" (UID: \"de961e2b-b16d-4db1-b908-5be30a74be3d\") " pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.483116 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/de961e2b-b16d-4db1-b908-5be30a74be3d-observability-operator-tls\") pod \"observability-operator-cc5f78dfc-8zx45\" (UID: \"de961e2b-b16d-4db1-b908-5be30a74be3d\") " pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.483148 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wlkd\" (UniqueName: \"kubernetes.io/projected/88a12b8d-6531-406b-becd-d70ba32fa6c1-kube-api-access-9wlkd\") pod \"perses-operator-54bc95c9fb-qnpbg\" (UID: \"88a12b8d-6531-406b-becd-d70ba32fa6c1\") " pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.483904 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/88a12b8d-6531-406b-becd-d70ba32fa6c1-openshift-service-ca\") pod \"perses-operator-54bc95c9fb-qnpbg\" (UID: \"88a12b8d-6531-406b-becd-d70ba32fa6c1\") " pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.487963 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/de961e2b-b16d-4db1-b908-5be30a74be3d-observability-operator-tls\") pod \"observability-operator-cc5f78dfc-8zx45\" (UID: \"de961e2b-b16d-4db1-b908-5be30a74be3d\") " pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.503611 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftv4t\" (UniqueName: \"kubernetes.io/projected/de961e2b-b16d-4db1-b908-5be30a74be3d-kube-api-access-ftv4t\") pod \"observability-operator-cc5f78dfc-8zx45\" (UID: \"de961e2b-b16d-4db1-b908-5be30a74be3d\") " pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.565566 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.582119 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.583592 4818 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators_73811ba7-972e-40ea-a82d-40a5e56341c4_0(db7d9834e809df4c07e859ccf7127685a7647e8135b7b00a6e7b47d36fdcc195): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.583633 4818 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators_73811ba7-972e-40ea-a82d-40a5e56341c4_0(db7d9834e809df4c07e859ccf7127685a7647e8135b7b00a6e7b47d36fdcc195): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.583679 4818 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators_73811ba7-972e-40ea-a82d-40a5e56341c4_0(db7d9834e809df4c07e859ccf7127685a7647e8135b7b00a6e7b47d36fdcc195): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.583724 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators(73811ba7-972e-40ea-a82d-40a5e56341c4)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators(73811ba7-972e-40ea-a82d-40a5e56341c4)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators_73811ba7-972e-40ea-a82d-40a5e56341c4_0(db7d9834e809df4c07e859ccf7127685a7647e8135b7b00a6e7b47d36fdcc195): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" podUID="73811ba7-972e-40ea-a82d-40a5e56341c4" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.585210 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/88a12b8d-6531-406b-becd-d70ba32fa6c1-openshift-service-ca\") pod \"perses-operator-54bc95c9fb-qnpbg\" (UID: \"88a12b8d-6531-406b-becd-d70ba32fa6c1\") " pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.585304 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wlkd\" (UniqueName: \"kubernetes.io/projected/88a12b8d-6531-406b-becd-d70ba32fa6c1-kube-api-access-9wlkd\") pod \"perses-operator-54bc95c9fb-qnpbg\" (UID: \"88a12b8d-6531-406b-becd-d70ba32fa6c1\") " pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.586231 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/88a12b8d-6531-406b-becd-d70ba32fa6c1-openshift-service-ca\") pod \"perses-operator-54bc95c9fb-qnpbg\" (UID: \"88a12b8d-6531-406b-becd-d70ba32fa6c1\") " pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.600418 4818 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg_openshift-operators_118fa0c6-c8bf-4ae6-9867-9aaf6ee11824_0(f11325576921d6a1b05aa48af2c34070dbb343014690c0daa77f035413096ecf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.600570 4818 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg_openshift-operators_118fa0c6-c8bf-4ae6-9867-9aaf6ee11824_0(f11325576921d6a1b05aa48af2c34070dbb343014690c0daa77f035413096ecf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.600641 4818 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg_openshift-operators_118fa0c6-c8bf-4ae6-9867-9aaf6ee11824_0(f11325576921d6a1b05aa48af2c34070dbb343014690c0daa77f035413096ecf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.600740 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg_openshift-operators(118fa0c6-c8bf-4ae6-9867-9aaf6ee11824)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg_openshift-operators(118fa0c6-c8bf-4ae6-9867-9aaf6ee11824)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg_openshift-operators_118fa0c6-c8bf-4ae6-9867-9aaf6ee11824_0(f11325576921d6a1b05aa48af2c34070dbb343014690c0daa77f035413096ecf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" podUID="118fa0c6-c8bf-4ae6-9867-9aaf6ee11824" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.602657 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wlkd\" (UniqueName: \"kubernetes.io/projected/88a12b8d-6531-406b-becd-d70ba32fa6c1-kube-api-access-9wlkd\") pod \"perses-operator-54bc95c9fb-qnpbg\" (UID: \"88a12b8d-6531-406b-becd-d70ba32fa6c1\") " pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.684265 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.701196 4818 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-cc5f78dfc-8zx45_openshift-operators_de961e2b-b16d-4db1-b908-5be30a74be3d_0(08178579de050954affcf2d176b5fa8341fec8d38163760748856f6a0b770857): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.701253 4818 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-cc5f78dfc-8zx45_openshift-operators_de961e2b-b16d-4db1-b908-5be30a74be3d_0(08178579de050954affcf2d176b5fa8341fec8d38163760748856f6a0b770857): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.701275 4818 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-cc5f78dfc-8zx45_openshift-operators_de961e2b-b16d-4db1-b908-5be30a74be3d_0(08178579de050954affcf2d176b5fa8341fec8d38163760748856f6a0b770857): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.701327 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-cc5f78dfc-8zx45_openshift-operators(de961e2b-b16d-4db1-b908-5be30a74be3d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-cc5f78dfc-8zx45_openshift-operators(de961e2b-b16d-4db1-b908-5be30a74be3d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-cc5f78dfc-8zx45_openshift-operators_de961e2b-b16d-4db1-b908-5be30a74be3d_0(08178579de050954affcf2d176b5fa8341fec8d38163760748856f6a0b770857): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" podUID="de961e2b-b16d-4db1-b908-5be30a74be3d" Sep 30 17:09:35 crc kubenswrapper[4818]: I0930 17:09:35.780476 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.804184 4818 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-54bc95c9fb-qnpbg_openshift-operators_88a12b8d-6531-406b-becd-d70ba32fa6c1_0(f7e49dc961533e55fb013b916e3f78adaeebd995e31a53eabd21501284a9585c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.804275 4818 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-54bc95c9fb-qnpbg_openshift-operators_88a12b8d-6531-406b-becd-d70ba32fa6c1_0(f7e49dc961533e55fb013b916e3f78adaeebd995e31a53eabd21501284a9585c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.804326 4818 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-54bc95c9fb-qnpbg_openshift-operators_88a12b8d-6531-406b-becd-d70ba32fa6c1_0(f7e49dc961533e55fb013b916e3f78adaeebd995e31a53eabd21501284a9585c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:09:35 crc kubenswrapper[4818]: E0930 17:09:35.804408 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-54bc95c9fb-qnpbg_openshift-operators(88a12b8d-6531-406b-becd-d70ba32fa6c1)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-54bc95c9fb-qnpbg_openshift-operators(88a12b8d-6531-406b-becd-d70ba32fa6c1)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-54bc95c9fb-qnpbg_openshift-operators_88a12b8d-6531-406b-becd-d70ba32fa6c1_0(f7e49dc961533e55fb013b916e3f78adaeebd995e31a53eabd21501284a9585c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" podUID="88a12b8d-6531-406b-becd-d70ba32fa6c1" Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.389084 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" event={"ID":"a8c48898-be34-41fd-81c8-ad4745c3c0fb","Type":"ContainerStarted","Data":"ae73dd9e31c171f69f451a76bf215c044d0595b207fa37339e3872d10cfc1e17"} Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.389356 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.389387 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.421246 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.422813 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" podStartSLOduration=7.42279258 podStartE2EDuration="7.42279258s" podCreationTimestamp="2025-09-30 17:09:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:09:36.417391734 +0000 UTC m=+623.171663550" watchObservedRunningTime="2025-09-30 17:09:36.42279258 +0000 UTC m=+623.177064396" Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.585048 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd"] Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.585139 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.585496 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.594297 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-54bc95c9fb-qnpbg"] Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.594410 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.594841 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.606118 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg"] Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.606200 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.606530 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.618710 4818 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators_73811ba7-972e-40ea-a82d-40a5e56341c4_0(1fb93f14433a7d746f2cc1333fe59b5b87be26ad57db11b539db7cb814d19774): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.618763 4818 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators_73811ba7-972e-40ea-a82d-40a5e56341c4_0(1fb93f14433a7d746f2cc1333fe59b5b87be26ad57db11b539db7cb814d19774): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.618783 4818 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators_73811ba7-972e-40ea-a82d-40a5e56341c4_0(1fb93f14433a7d746f2cc1333fe59b5b87be26ad57db11b539db7cb814d19774): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.618818 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators(73811ba7-972e-40ea-a82d-40a5e56341c4)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators(73811ba7-972e-40ea-a82d-40a5e56341c4)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators_73811ba7-972e-40ea-a82d-40a5e56341c4_0(1fb93f14433a7d746f2cc1333fe59b5b87be26ad57db11b539db7cb814d19774): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" podUID="73811ba7-972e-40ea-a82d-40a5e56341c4" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.649451 4818 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-54bc95c9fb-qnpbg_openshift-operators_88a12b8d-6531-406b-becd-d70ba32fa6c1_0(f8371f4007152258b995797303b57131d02ef9730026954dcdf7c88c6119c77e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.649508 4818 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-54bc95c9fb-qnpbg_openshift-operators_88a12b8d-6531-406b-becd-d70ba32fa6c1_0(f8371f4007152258b995797303b57131d02ef9730026954dcdf7c88c6119c77e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.649532 4818 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-54bc95c9fb-qnpbg_openshift-operators_88a12b8d-6531-406b-becd-d70ba32fa6c1_0(f8371f4007152258b995797303b57131d02ef9730026954dcdf7c88c6119c77e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.649572 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-54bc95c9fb-qnpbg_openshift-operators(88a12b8d-6531-406b-becd-d70ba32fa6c1)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-54bc95c9fb-qnpbg_openshift-operators(88a12b8d-6531-406b-becd-d70ba32fa6c1)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-54bc95c9fb-qnpbg_openshift-operators_88a12b8d-6531-406b-becd-d70ba32fa6c1_0(f8371f4007152258b995797303b57131d02ef9730026954dcdf7c88c6119c77e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" podUID="88a12b8d-6531-406b-becd-d70ba32fa6c1" Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.652495 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d"] Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.652581 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.652956 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.669357 4818 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg_openshift-operators_118fa0c6-c8bf-4ae6-9867-9aaf6ee11824_0(b504b77b3a21697fc776ded8d8736115a756d9bdd9eecd99e9cea8e70d530f98): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.669412 4818 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg_openshift-operators_118fa0c6-c8bf-4ae6-9867-9aaf6ee11824_0(b504b77b3a21697fc776ded8d8736115a756d9bdd9eecd99e9cea8e70d530f98): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.669432 4818 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg_openshift-operators_118fa0c6-c8bf-4ae6-9867-9aaf6ee11824_0(b504b77b3a21697fc776ded8d8736115a756d9bdd9eecd99e9cea8e70d530f98): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.669477 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg_openshift-operators(118fa0c6-c8bf-4ae6-9867-9aaf6ee11824)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg_openshift-operators(118fa0c6-c8bf-4ae6-9867-9aaf6ee11824)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg_openshift-operators_118fa0c6-c8bf-4ae6-9867-9aaf6ee11824_0(b504b77b3a21697fc776ded8d8736115a756d9bdd9eecd99e9cea8e70d530f98): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" podUID="118fa0c6-c8bf-4ae6-9867-9aaf6ee11824" Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.682769 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-cc5f78dfc-8zx45"] Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.682862 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:09:36 crc kubenswrapper[4818]: I0930 17:09:36.683248 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.688094 4818 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-7c8cf85677-bzv8d_openshift-operators_96170b1f-1f7b-45df-a1e4-5d9901097907_0(1500de1972f2d7522a9033d2bad1637dfb05fb45ac37d2eeebe4062f083614e1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.688157 4818 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-7c8cf85677-bzv8d_openshift-operators_96170b1f-1f7b-45df-a1e4-5d9901097907_0(1500de1972f2d7522a9033d2bad1637dfb05fb45ac37d2eeebe4062f083614e1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.688182 4818 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-7c8cf85677-bzv8d_openshift-operators_96170b1f-1f7b-45df-a1e4-5d9901097907_0(1500de1972f2d7522a9033d2bad1637dfb05fb45ac37d2eeebe4062f083614e1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.688229 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-7c8cf85677-bzv8d_openshift-operators(96170b1f-1f7b-45df-a1e4-5d9901097907)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-7c8cf85677-bzv8d_openshift-operators(96170b1f-1f7b-45df-a1e4-5d9901097907)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-7c8cf85677-bzv8d_openshift-operators_96170b1f-1f7b-45df-a1e4-5d9901097907_0(1500de1972f2d7522a9033d2bad1637dfb05fb45ac37d2eeebe4062f083614e1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" podUID="96170b1f-1f7b-45df-a1e4-5d9901097907" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.715074 4818 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-cc5f78dfc-8zx45_openshift-operators_de961e2b-b16d-4db1-b908-5be30a74be3d_0(65cc67273411d33c41901bf6c3945aaf35de217664a4bfdab241f189c6475e1b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.715165 4818 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-cc5f78dfc-8zx45_openshift-operators_de961e2b-b16d-4db1-b908-5be30a74be3d_0(65cc67273411d33c41901bf6c3945aaf35de217664a4bfdab241f189c6475e1b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.715226 4818 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-cc5f78dfc-8zx45_openshift-operators_de961e2b-b16d-4db1-b908-5be30a74be3d_0(65cc67273411d33c41901bf6c3945aaf35de217664a4bfdab241f189c6475e1b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:09:36 crc kubenswrapper[4818]: E0930 17:09:36.715279 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-cc5f78dfc-8zx45_openshift-operators(de961e2b-b16d-4db1-b908-5be30a74be3d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-cc5f78dfc-8zx45_openshift-operators(de961e2b-b16d-4db1-b908-5be30a74be3d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-cc5f78dfc-8zx45_openshift-operators_de961e2b-b16d-4db1-b908-5be30a74be3d_0(65cc67273411d33c41901bf6c3945aaf35de217664a4bfdab241f189c6475e1b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" podUID="de961e2b-b16d-4db1-b908-5be30a74be3d" Sep 30 17:09:37 crc kubenswrapper[4818]: I0930 17:09:37.396310 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:37 crc kubenswrapper[4818]: I0930 17:09:37.469990 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:09:44 crc kubenswrapper[4818]: I0930 17:09:44.023552 4818 scope.go:117] "RemoveContainer" containerID="16f00b29df5ca66c8eb980b856c8659e891bb2ee5eec8c4baf8196533a20321b" Sep 30 17:09:44 crc kubenswrapper[4818]: E0930 17:09:44.024329 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-hq6j2_openshift-multus(d36fce8a-ff27-48bf-be9c-67fc2046136d)\"" pod="openshift-multus/multus-hq6j2" podUID="d36fce8a-ff27-48bf-be9c-67fc2046136d" Sep 30 17:09:48 crc kubenswrapper[4818]: I0930 17:09:48.020270 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:09:48 crc kubenswrapper[4818]: I0930 17:09:48.020289 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:09:48 crc kubenswrapper[4818]: I0930 17:09:48.020861 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:09:48 crc kubenswrapper[4818]: I0930 17:09:48.021180 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:09:48 crc kubenswrapper[4818]: E0930 17:09:48.047513 4818 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators_73811ba7-972e-40ea-a82d-40a5e56341c4_0(86ffa710238f89b6639bd52e00457c32c8545cbcc03074075814ea4faa2a400b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:09:48 crc kubenswrapper[4818]: E0930 17:09:48.047636 4818 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators_73811ba7-972e-40ea-a82d-40a5e56341c4_0(86ffa710238f89b6639bd52e00457c32c8545cbcc03074075814ea4faa2a400b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:09:48 crc kubenswrapper[4818]: E0930 17:09:48.047715 4818 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators_73811ba7-972e-40ea-a82d-40a5e56341c4_0(86ffa710238f89b6639bd52e00457c32c8545cbcc03074075814ea4faa2a400b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:09:48 crc kubenswrapper[4818]: E0930 17:09:48.047819 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators(73811ba7-972e-40ea-a82d-40a5e56341c4)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators(73811ba7-972e-40ea-a82d-40a5e56341c4)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators_73811ba7-972e-40ea-a82d-40a5e56341c4_0(86ffa710238f89b6639bd52e00457c32c8545cbcc03074075814ea4faa2a400b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" podUID="73811ba7-972e-40ea-a82d-40a5e56341c4" Sep 30 17:09:48 crc kubenswrapper[4818]: E0930 17:09:48.051615 4818 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-54bc95c9fb-qnpbg_openshift-operators_88a12b8d-6531-406b-becd-d70ba32fa6c1_0(677f84daf36711612298491a2a118ad234428362cbfe2fc7b7b96c641bbbb742): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:09:48 crc kubenswrapper[4818]: E0930 17:09:48.051675 4818 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-54bc95c9fb-qnpbg_openshift-operators_88a12b8d-6531-406b-becd-d70ba32fa6c1_0(677f84daf36711612298491a2a118ad234428362cbfe2fc7b7b96c641bbbb742): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:09:48 crc kubenswrapper[4818]: E0930 17:09:48.051702 4818 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-54bc95c9fb-qnpbg_openshift-operators_88a12b8d-6531-406b-becd-d70ba32fa6c1_0(677f84daf36711612298491a2a118ad234428362cbfe2fc7b7b96c641bbbb742): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:09:48 crc kubenswrapper[4818]: E0930 17:09:48.051750 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-54bc95c9fb-qnpbg_openshift-operators(88a12b8d-6531-406b-becd-d70ba32fa6c1)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-54bc95c9fb-qnpbg_openshift-operators(88a12b8d-6531-406b-becd-d70ba32fa6c1)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-54bc95c9fb-qnpbg_openshift-operators_88a12b8d-6531-406b-becd-d70ba32fa6c1_0(677f84daf36711612298491a2a118ad234428362cbfe2fc7b7b96c641bbbb742): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" podUID="88a12b8d-6531-406b-becd-d70ba32fa6c1" Sep 30 17:09:49 crc kubenswrapper[4818]: I0930 17:09:49.019577 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:09:49 crc kubenswrapper[4818]: I0930 17:09:49.019753 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:09:49 crc kubenswrapper[4818]: I0930 17:09:49.020062 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:09:49 crc kubenswrapper[4818]: I0930 17:09:49.020310 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:09:49 crc kubenswrapper[4818]: E0930 17:09:49.046737 4818 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg_openshift-operators_118fa0c6-c8bf-4ae6-9867-9aaf6ee11824_0(80798c571193496df27ae69ec31c1509f5b37aca70136a6f2e2f428e24d44737): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:09:49 crc kubenswrapper[4818]: E0930 17:09:49.047238 4818 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg_openshift-operators_118fa0c6-c8bf-4ae6-9867-9aaf6ee11824_0(80798c571193496df27ae69ec31c1509f5b37aca70136a6f2e2f428e24d44737): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:09:49 crc kubenswrapper[4818]: E0930 17:09:49.047337 4818 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg_openshift-operators_118fa0c6-c8bf-4ae6-9867-9aaf6ee11824_0(80798c571193496df27ae69ec31c1509f5b37aca70136a6f2e2f428e24d44737): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:09:49 crc kubenswrapper[4818]: E0930 17:09:49.047468 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg_openshift-operators(118fa0c6-c8bf-4ae6-9867-9aaf6ee11824)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg_openshift-operators(118fa0c6-c8bf-4ae6-9867-9aaf6ee11824)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg_openshift-operators_118fa0c6-c8bf-4ae6-9867-9aaf6ee11824_0(80798c571193496df27ae69ec31c1509f5b37aca70136a6f2e2f428e24d44737): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" podUID="118fa0c6-c8bf-4ae6-9867-9aaf6ee11824" Sep 30 17:09:49 crc kubenswrapper[4818]: E0930 17:09:49.059104 4818 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-cc5f78dfc-8zx45_openshift-operators_de961e2b-b16d-4db1-b908-5be30a74be3d_0(682d00aa153dc1336718f2dac718394c64456903c5713f6b935c07ef987f4478): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:09:49 crc kubenswrapper[4818]: E0930 17:09:49.059272 4818 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-cc5f78dfc-8zx45_openshift-operators_de961e2b-b16d-4db1-b908-5be30a74be3d_0(682d00aa153dc1336718f2dac718394c64456903c5713f6b935c07ef987f4478): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:09:49 crc kubenswrapper[4818]: E0930 17:09:49.059364 4818 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-cc5f78dfc-8zx45_openshift-operators_de961e2b-b16d-4db1-b908-5be30a74be3d_0(682d00aa153dc1336718f2dac718394c64456903c5713f6b935c07ef987f4478): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:09:49 crc kubenswrapper[4818]: E0930 17:09:49.059489 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-cc5f78dfc-8zx45_openshift-operators(de961e2b-b16d-4db1-b908-5be30a74be3d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-cc5f78dfc-8zx45_openshift-operators(de961e2b-b16d-4db1-b908-5be30a74be3d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-cc5f78dfc-8zx45_openshift-operators_de961e2b-b16d-4db1-b908-5be30a74be3d_0(682d00aa153dc1336718f2dac718394c64456903c5713f6b935c07ef987f4478): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" podUID="de961e2b-b16d-4db1-b908-5be30a74be3d" Sep 30 17:09:52 crc kubenswrapper[4818]: I0930 17:09:52.020156 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" Sep 30 17:09:52 crc kubenswrapper[4818]: I0930 17:09:52.020821 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" Sep 30 17:09:52 crc kubenswrapper[4818]: E0930 17:09:52.054043 4818 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-7c8cf85677-bzv8d_openshift-operators_96170b1f-1f7b-45df-a1e4-5d9901097907_0(009fdc35013475aa84f9ec5613fb18f1cab2b075b4d27c35605c9cc349549c0b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:09:52 crc kubenswrapper[4818]: E0930 17:09:52.054174 4818 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-7c8cf85677-bzv8d_openshift-operators_96170b1f-1f7b-45df-a1e4-5d9901097907_0(009fdc35013475aa84f9ec5613fb18f1cab2b075b4d27c35605c9cc349549c0b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" Sep 30 17:09:52 crc kubenswrapper[4818]: E0930 17:09:52.054251 4818 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-7c8cf85677-bzv8d_openshift-operators_96170b1f-1f7b-45df-a1e4-5d9901097907_0(009fdc35013475aa84f9ec5613fb18f1cab2b075b4d27c35605c9cc349549c0b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" Sep 30 17:09:52 crc kubenswrapper[4818]: E0930 17:09:52.054341 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-7c8cf85677-bzv8d_openshift-operators(96170b1f-1f7b-45df-a1e4-5d9901097907)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-7c8cf85677-bzv8d_openshift-operators(96170b1f-1f7b-45df-a1e4-5d9901097907)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-7c8cf85677-bzv8d_openshift-operators_96170b1f-1f7b-45df-a1e4-5d9901097907_0(009fdc35013475aa84f9ec5613fb18f1cab2b075b4d27c35605c9cc349549c0b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" podUID="96170b1f-1f7b-45df-a1e4-5d9901097907" Sep 30 17:09:59 crc kubenswrapper[4818]: I0930 17:09:59.021022 4818 scope.go:117] "RemoveContainer" containerID="16f00b29df5ca66c8eb980b856c8659e891bb2ee5eec8c4baf8196533a20321b" Sep 30 17:09:59 crc kubenswrapper[4818]: I0930 17:09:59.510323 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hq6j2_d36fce8a-ff27-48bf-be9c-67fc2046136d/kube-multus/2.log" Sep 30 17:09:59 crc kubenswrapper[4818]: I0930 17:09:59.511115 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hq6j2_d36fce8a-ff27-48bf-be9c-67fc2046136d/kube-multus/1.log" Sep 30 17:09:59 crc kubenswrapper[4818]: I0930 17:09:59.511169 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hq6j2" event={"ID":"d36fce8a-ff27-48bf-be9c-67fc2046136d","Type":"ContainerStarted","Data":"4919d087088f0a2dc3fd943db8873e26efbefa54ed795d2aa224af26c33e09af"} Sep 30 17:09:59 crc kubenswrapper[4818]: I0930 17:09:59.627713 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-n7vzp" Sep 30 17:10:00 crc kubenswrapper[4818]: I0930 17:10:00.020021 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:10:00 crc kubenswrapper[4818]: I0930 17:10:00.020716 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:10:00 crc kubenswrapper[4818]: E0930 17:10:00.043273 4818 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators_73811ba7-972e-40ea-a82d-40a5e56341c4_0(03ca564eb920e3f581ac0a1420c19235a5b951df80b71f09998d372cd612096d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 30 17:10:00 crc kubenswrapper[4818]: E0930 17:10:00.043332 4818 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators_73811ba7-972e-40ea-a82d-40a5e56341c4_0(03ca564eb920e3f581ac0a1420c19235a5b951df80b71f09998d372cd612096d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:10:00 crc kubenswrapper[4818]: E0930 17:10:00.043355 4818 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators_73811ba7-972e-40ea-a82d-40a5e56341c4_0(03ca564eb920e3f581ac0a1420c19235a5b951df80b71f09998d372cd612096d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:10:00 crc kubenswrapper[4818]: E0930 17:10:00.043403 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators(73811ba7-972e-40ea-a82d-40a5e56341c4)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators(73811ba7-972e-40ea-a82d-40a5e56341c4)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_openshift-operators_73811ba7-972e-40ea-a82d-40a5e56341c4_0(03ca564eb920e3f581ac0a1420c19235a5b951df80b71f09998d372cd612096d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" podUID="73811ba7-972e-40ea-a82d-40a5e56341c4" Sep 30 17:10:01 crc kubenswrapper[4818]: I0930 17:10:01.020295 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:10:01 crc kubenswrapper[4818]: I0930 17:10:01.020370 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:10:01 crc kubenswrapper[4818]: I0930 17:10:01.021387 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:10:01 crc kubenswrapper[4818]: I0930 17:10:01.021422 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:10:01 crc kubenswrapper[4818]: I0930 17:10:01.275413 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-54bc95c9fb-qnpbg"] Sep 30 17:10:01 crc kubenswrapper[4818]: I0930 17:10:01.323390 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-cc5f78dfc-8zx45"] Sep 30 17:10:01 crc kubenswrapper[4818]: W0930 17:10:01.325614 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde961e2b_b16d_4db1_b908_5be30a74be3d.slice/crio-c8e26d5741c42b64a75b879ca15906941c5803efdec566d718b851a932c586d2 WatchSource:0}: Error finding container c8e26d5741c42b64a75b879ca15906941c5803efdec566d718b851a932c586d2: Status 404 returned error can't find the container with id c8e26d5741c42b64a75b879ca15906941c5803efdec566d718b851a932c586d2 Sep 30 17:10:01 crc kubenswrapper[4818]: I0930 17:10:01.527335 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" event={"ID":"de961e2b-b16d-4db1-b908-5be30a74be3d","Type":"ContainerStarted","Data":"c8e26d5741c42b64a75b879ca15906941c5803efdec566d718b851a932c586d2"} Sep 30 17:10:01 crc kubenswrapper[4818]: I0930 17:10:01.530004 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" event={"ID":"88a12b8d-6531-406b-becd-d70ba32fa6c1","Type":"ContainerStarted","Data":"aff011f5d665731f0b212cad34f245e1bc93092ae2b2e68ba06ba79fdca12329"} Sep 30 17:10:03 crc kubenswrapper[4818]: I0930 17:10:03.020068 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" Sep 30 17:10:03 crc kubenswrapper[4818]: I0930 17:10:03.020520 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" Sep 30 17:10:03 crc kubenswrapper[4818]: I0930 17:10:03.231708 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d"] Sep 30 17:10:03 crc kubenswrapper[4818]: I0930 17:10:03.542841 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" event={"ID":"96170b1f-1f7b-45df-a1e4-5d9901097907","Type":"ContainerStarted","Data":"0481c04b4b25d6df9432e4569af54b4b5e45511736f1eb4ae2b9bd27e1bb3c58"} Sep 30 17:10:04 crc kubenswrapper[4818]: I0930 17:10:04.019969 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:10:04 crc kubenswrapper[4818]: I0930 17:10:04.025692 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" Sep 30 17:10:04 crc kubenswrapper[4818]: I0930 17:10:04.461697 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg"] Sep 30 17:10:07 crc kubenswrapper[4818]: W0930 17:10:07.474802 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod118fa0c6_c8bf_4ae6_9867_9aaf6ee11824.slice/crio-cce527519e647bc89a69fa96c6af7532d345ac24c819ae6d629d88a51e384184 WatchSource:0}: Error finding container cce527519e647bc89a69fa96c6af7532d345ac24c819ae6d629d88a51e384184: Status 404 returned error can't find the container with id cce527519e647bc89a69fa96c6af7532d345ac24c819ae6d629d88a51e384184 Sep 30 17:10:07 crc kubenswrapper[4818]: I0930 17:10:07.567622 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" event={"ID":"118fa0c6-c8bf-4ae6-9867-9aaf6ee11824","Type":"ContainerStarted","Data":"cce527519e647bc89a69fa96c6af7532d345ac24c819ae6d629d88a51e384184"} Sep 30 17:10:11 crc kubenswrapper[4818]: I0930 17:10:11.596137 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" event={"ID":"96170b1f-1f7b-45df-a1e4-5d9901097907","Type":"ContainerStarted","Data":"83b163d295f338f61c1344e6c536788c66f613077a13d999dcd2451eb7eaf6fe"} Sep 30 17:10:11 crc kubenswrapper[4818]: I0930 17:10:11.600254 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" event={"ID":"118fa0c6-c8bf-4ae6-9867-9aaf6ee11824","Type":"ContainerStarted","Data":"5e5b467fe29665ebcfe9a49620a232dcca15256ade864f1f9094c3fabe156ef9"} Sep 30 17:10:11 crc kubenswrapper[4818]: I0930 17:10:11.603613 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" event={"ID":"de961e2b-b16d-4db1-b908-5be30a74be3d","Type":"ContainerStarted","Data":"7454d75116bdf6406a66247859cb8710ed7a821aa302c805898c667d047c710b"} Sep 30 17:10:11 crc kubenswrapper[4818]: I0930 17:10:11.604341 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:10:11 crc kubenswrapper[4818]: I0930 17:10:11.605459 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" event={"ID":"88a12b8d-6531-406b-becd-d70ba32fa6c1","Type":"ContainerStarted","Data":"64faaa7006d4a667c300ad1aec1658017ccc9b54d43ad9edafcfedbbbc67fb06"} Sep 30 17:10:11 crc kubenswrapper[4818]: I0930 17:10:11.606333 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:10:11 crc kubenswrapper[4818]: I0930 17:10:11.607454 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" Sep 30 17:10:11 crc kubenswrapper[4818]: I0930 17:10:11.645263 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" podStartSLOduration=27.527608335 podStartE2EDuration="36.645240112s" podCreationTimestamp="2025-09-30 17:09:35 +0000 UTC" firstStartedPulling="2025-09-30 17:10:01.293871403 +0000 UTC m=+648.048143219" lastFinishedPulling="2025-09-30 17:10:10.41150317 +0000 UTC m=+657.165774996" observedRunningTime="2025-09-30 17:10:11.642802926 +0000 UTC m=+658.397074782" watchObservedRunningTime="2025-09-30 17:10:11.645240112 +0000 UTC m=+658.399511958" Sep 30 17:10:11 crc kubenswrapper[4818]: I0930 17:10:11.651208 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-bzv8d" podStartSLOduration=29.452718028 podStartE2EDuration="36.651192173s" podCreationTimestamp="2025-09-30 17:09:35 +0000 UTC" firstStartedPulling="2025-09-30 17:10:03.25890558 +0000 UTC m=+650.013177396" lastFinishedPulling="2025-09-30 17:10:10.457379715 +0000 UTC m=+657.211651541" observedRunningTime="2025-09-30 17:10:11.62455146 +0000 UTC m=+658.378823276" watchObservedRunningTime="2025-09-30 17:10:11.651192173 +0000 UTC m=+658.405464019" Sep 30 17:10:11 crc kubenswrapper[4818]: I0930 17:10:11.681055 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg" podStartSLOduration=33.736793341 podStartE2EDuration="36.681025763s" podCreationTimestamp="2025-09-30 17:09:35 +0000 UTC" firstStartedPulling="2025-09-30 17:10:07.479679825 +0000 UTC m=+654.233951641" lastFinishedPulling="2025-09-30 17:10:10.423912227 +0000 UTC m=+657.178184063" observedRunningTime="2025-09-30 17:10:11.668152494 +0000 UTC m=+658.422424320" watchObservedRunningTime="2025-09-30 17:10:11.681025763 +0000 UTC m=+658.435297589" Sep 30 17:10:11 crc kubenswrapper[4818]: I0930 17:10:11.717471 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-cc5f78dfc-8zx45" podStartSLOduration=27.634257713 podStartE2EDuration="36.717452831s" podCreationTimestamp="2025-09-30 17:09:35 +0000 UTC" firstStartedPulling="2025-09-30 17:10:01.327953282 +0000 UTC m=+648.082225098" lastFinishedPulling="2025-09-30 17:10:10.41114838 +0000 UTC m=+657.165420216" observedRunningTime="2025-09-30 17:10:11.716484355 +0000 UTC m=+658.470756181" watchObservedRunningTime="2025-09-30 17:10:11.717452831 +0000 UTC m=+658.471724647" Sep 30 17:10:13 crc kubenswrapper[4818]: I0930 17:10:13.020337 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:10:13 crc kubenswrapper[4818]: I0930 17:10:13.020831 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" Sep 30 17:10:13 crc kubenswrapper[4818]: I0930 17:10:13.248826 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd"] Sep 30 17:10:13 crc kubenswrapper[4818]: W0930 17:10:13.259514 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod73811ba7_972e_40ea_a82d_40a5e56341c4.slice/crio-b67dd919bc55813a87617f6d65fd3d5d3357438e3575cd8cec483f05b268a828 WatchSource:0}: Error finding container b67dd919bc55813a87617f6d65fd3d5d3357438e3575cd8cec483f05b268a828: Status 404 returned error can't find the container with id b67dd919bc55813a87617f6d65fd3d5d3357438e3575cd8cec483f05b268a828 Sep 30 17:10:13 crc kubenswrapper[4818]: I0930 17:10:13.617075 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" event={"ID":"73811ba7-972e-40ea-a82d-40a5e56341c4","Type":"ContainerStarted","Data":"9a75e95ebcf33b4568e549c714e6250794545ca1c27d14e53050224e176af99e"} Sep 30 17:10:13 crc kubenswrapper[4818]: I0930 17:10:13.617132 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" event={"ID":"73811ba7-972e-40ea-a82d-40a5e56341c4","Type":"ContainerStarted","Data":"b67dd919bc55813a87617f6d65fd3d5d3357438e3575cd8cec483f05b268a828"} Sep 30 17:10:13 crc kubenswrapper[4818]: I0930 17:10:13.644889 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd" podStartSLOduration=38.644876719 podStartE2EDuration="38.644876719s" podCreationTimestamp="2025-09-30 17:09:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:10:13.642203076 +0000 UTC m=+660.396474922" watchObservedRunningTime="2025-09-30 17:10:13.644876719 +0000 UTC m=+660.399148535" Sep 30 17:10:14 crc kubenswrapper[4818]: I0930 17:10:14.228767 4818 scope.go:117] "RemoveContainer" containerID="9b2cdabb8638db6c90e9b3623898192035c1688291bf0f7e0ffbd32f2cd12d35" Sep 30 17:10:14 crc kubenswrapper[4818]: I0930 17:10:14.626121 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hq6j2_d36fce8a-ff27-48bf-be9c-67fc2046136d/kube-multus/2.log" Sep 30 17:10:15 crc kubenswrapper[4818]: I0930 17:10:15.784185 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-54bc95c9fb-qnpbg" Sep 30 17:10:21 crc kubenswrapper[4818]: I0930 17:10:21.313337 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql"] Sep 30 17:10:21 crc kubenswrapper[4818]: I0930 17:10:21.315372 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql" Sep 30 17:10:21 crc kubenswrapper[4818]: I0930 17:10:21.317573 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Sep 30 17:10:21 crc kubenswrapper[4818]: I0930 17:10:21.328532 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql"] Sep 30 17:10:21 crc kubenswrapper[4818]: I0930 17:10:21.490475 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f8d58532-c164-4d67-be63-2324034f1706-util\") pod \"9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql\" (UID: \"f8d58532-c164-4d67-be63-2324034f1706\") " pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql" Sep 30 17:10:21 crc kubenswrapper[4818]: I0930 17:10:21.490563 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f8d58532-c164-4d67-be63-2324034f1706-bundle\") pod \"9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql\" (UID: \"f8d58532-c164-4d67-be63-2324034f1706\") " pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql" Sep 30 17:10:21 crc kubenswrapper[4818]: I0930 17:10:21.490873 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqnmz\" (UniqueName: \"kubernetes.io/projected/f8d58532-c164-4d67-be63-2324034f1706-kube-api-access-wqnmz\") pod \"9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql\" (UID: \"f8d58532-c164-4d67-be63-2324034f1706\") " pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql" Sep 30 17:10:21 crc kubenswrapper[4818]: I0930 17:10:21.592126 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f8d58532-c164-4d67-be63-2324034f1706-util\") pod \"9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql\" (UID: \"f8d58532-c164-4d67-be63-2324034f1706\") " pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql" Sep 30 17:10:21 crc kubenswrapper[4818]: I0930 17:10:21.592210 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f8d58532-c164-4d67-be63-2324034f1706-bundle\") pod \"9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql\" (UID: \"f8d58532-c164-4d67-be63-2324034f1706\") " pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql" Sep 30 17:10:21 crc kubenswrapper[4818]: I0930 17:10:21.592347 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqnmz\" (UniqueName: \"kubernetes.io/projected/f8d58532-c164-4d67-be63-2324034f1706-kube-api-access-wqnmz\") pod \"9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql\" (UID: \"f8d58532-c164-4d67-be63-2324034f1706\") " pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql" Sep 30 17:10:21 crc kubenswrapper[4818]: I0930 17:10:21.592693 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f8d58532-c164-4d67-be63-2324034f1706-util\") pod \"9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql\" (UID: \"f8d58532-c164-4d67-be63-2324034f1706\") " pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql" Sep 30 17:10:21 crc kubenswrapper[4818]: I0930 17:10:21.593037 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f8d58532-c164-4d67-be63-2324034f1706-bundle\") pod \"9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql\" (UID: \"f8d58532-c164-4d67-be63-2324034f1706\") " pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql" Sep 30 17:10:21 crc kubenswrapper[4818]: I0930 17:10:21.627728 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqnmz\" (UniqueName: \"kubernetes.io/projected/f8d58532-c164-4d67-be63-2324034f1706-kube-api-access-wqnmz\") pod \"9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql\" (UID: \"f8d58532-c164-4d67-be63-2324034f1706\") " pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql" Sep 30 17:10:21 crc kubenswrapper[4818]: I0930 17:10:21.631136 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql" Sep 30 17:10:21 crc kubenswrapper[4818]: I0930 17:10:21.890894 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql"] Sep 30 17:10:21 crc kubenswrapper[4818]: W0930 17:10:21.902279 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf8d58532_c164_4d67_be63_2324034f1706.slice/crio-473b70d1229fa73d0f4d4def12c379238db15f72ea07b9557f7d62417d9825e8 WatchSource:0}: Error finding container 473b70d1229fa73d0f4d4def12c379238db15f72ea07b9557f7d62417d9825e8: Status 404 returned error can't find the container with id 473b70d1229fa73d0f4d4def12c379238db15f72ea07b9557f7d62417d9825e8 Sep 30 17:10:22 crc kubenswrapper[4818]: I0930 17:10:22.680202 4818 generic.go:334] "Generic (PLEG): container finished" podID="f8d58532-c164-4d67-be63-2324034f1706" containerID="8234c60cfcf7f65f066bdba8e844e539ff560b5f87b29d0cad141df22fc2dbe8" exitCode=0 Sep 30 17:10:22 crc kubenswrapper[4818]: I0930 17:10:22.680243 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql" event={"ID":"f8d58532-c164-4d67-be63-2324034f1706","Type":"ContainerDied","Data":"8234c60cfcf7f65f066bdba8e844e539ff560b5f87b29d0cad141df22fc2dbe8"} Sep 30 17:10:22 crc kubenswrapper[4818]: I0930 17:10:22.680266 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql" event={"ID":"f8d58532-c164-4d67-be63-2324034f1706","Type":"ContainerStarted","Data":"473b70d1229fa73d0f4d4def12c379238db15f72ea07b9557f7d62417d9825e8"} Sep 30 17:10:24 crc kubenswrapper[4818]: I0930 17:10:24.713675 4818 generic.go:334] "Generic (PLEG): container finished" podID="f8d58532-c164-4d67-be63-2324034f1706" containerID="527ba89990b36079b50d4f2911b7a2b49d62dd677b699c2352ade8acc051020b" exitCode=0 Sep 30 17:10:24 crc kubenswrapper[4818]: I0930 17:10:24.714083 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql" event={"ID":"f8d58532-c164-4d67-be63-2324034f1706","Type":"ContainerDied","Data":"527ba89990b36079b50d4f2911b7a2b49d62dd677b699c2352ade8acc051020b"} Sep 30 17:10:25 crc kubenswrapper[4818]: I0930 17:10:25.723795 4818 generic.go:334] "Generic (PLEG): container finished" podID="f8d58532-c164-4d67-be63-2324034f1706" containerID="04fa2db30e2fcb7b632c008b036c590794687708927711c509b70a2e867edf35" exitCode=0 Sep 30 17:10:25 crc kubenswrapper[4818]: I0930 17:10:25.723918 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql" event={"ID":"f8d58532-c164-4d67-be63-2324034f1706","Type":"ContainerDied","Data":"04fa2db30e2fcb7b632c008b036c590794687708927711c509b70a2e867edf35"} Sep 30 17:10:26 crc kubenswrapper[4818]: I0930 17:10:26.957197 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql" Sep 30 17:10:27 crc kubenswrapper[4818]: I0930 17:10:27.065269 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f8d58532-c164-4d67-be63-2324034f1706-util\") pod \"f8d58532-c164-4d67-be63-2324034f1706\" (UID: \"f8d58532-c164-4d67-be63-2324034f1706\") " Sep 30 17:10:27 crc kubenswrapper[4818]: I0930 17:10:27.065415 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f8d58532-c164-4d67-be63-2324034f1706-bundle\") pod \"f8d58532-c164-4d67-be63-2324034f1706\" (UID: \"f8d58532-c164-4d67-be63-2324034f1706\") " Sep 30 17:10:27 crc kubenswrapper[4818]: I0930 17:10:27.065465 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wqnmz\" (UniqueName: \"kubernetes.io/projected/f8d58532-c164-4d67-be63-2324034f1706-kube-api-access-wqnmz\") pod \"f8d58532-c164-4d67-be63-2324034f1706\" (UID: \"f8d58532-c164-4d67-be63-2324034f1706\") " Sep 30 17:10:27 crc kubenswrapper[4818]: I0930 17:10:27.066052 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8d58532-c164-4d67-be63-2324034f1706-bundle" (OuterVolumeSpecName: "bundle") pod "f8d58532-c164-4d67-be63-2324034f1706" (UID: "f8d58532-c164-4d67-be63-2324034f1706"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:10:27 crc kubenswrapper[4818]: I0930 17:10:27.071353 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8d58532-c164-4d67-be63-2324034f1706-kube-api-access-wqnmz" (OuterVolumeSpecName: "kube-api-access-wqnmz") pod "f8d58532-c164-4d67-be63-2324034f1706" (UID: "f8d58532-c164-4d67-be63-2324034f1706"). InnerVolumeSpecName "kube-api-access-wqnmz". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:10:27 crc kubenswrapper[4818]: I0930 17:10:27.087891 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8d58532-c164-4d67-be63-2324034f1706-util" (OuterVolumeSpecName: "util") pod "f8d58532-c164-4d67-be63-2324034f1706" (UID: "f8d58532-c164-4d67-be63-2324034f1706"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:10:27 crc kubenswrapper[4818]: I0930 17:10:27.167123 4818 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f8d58532-c164-4d67-be63-2324034f1706-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:10:27 crc kubenswrapper[4818]: I0930 17:10:27.167177 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wqnmz\" (UniqueName: \"kubernetes.io/projected/f8d58532-c164-4d67-be63-2324034f1706-kube-api-access-wqnmz\") on node \"crc\" DevicePath \"\"" Sep 30 17:10:27 crc kubenswrapper[4818]: I0930 17:10:27.167199 4818 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f8d58532-c164-4d67-be63-2324034f1706-util\") on node \"crc\" DevicePath \"\"" Sep 30 17:10:27 crc kubenswrapper[4818]: I0930 17:10:27.741407 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql" event={"ID":"f8d58532-c164-4d67-be63-2324034f1706","Type":"ContainerDied","Data":"473b70d1229fa73d0f4d4def12c379238db15f72ea07b9557f7d62417d9825e8"} Sep 30 17:10:27 crc kubenswrapper[4818]: I0930 17:10:27.741462 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="473b70d1229fa73d0f4d4def12c379238db15f72ea07b9557f7d62417d9825e8" Sep 30 17:10:27 crc kubenswrapper[4818]: I0930 17:10:27.741489 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql" Sep 30 17:10:32 crc kubenswrapper[4818]: I0930 17:10:32.933882 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5d6f6cfd66-qkrhb"] Sep 30 17:10:32 crc kubenswrapper[4818]: E0930 17:10:32.934694 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8d58532-c164-4d67-be63-2324034f1706" containerName="pull" Sep 30 17:10:32 crc kubenswrapper[4818]: I0930 17:10:32.934708 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8d58532-c164-4d67-be63-2324034f1706" containerName="pull" Sep 30 17:10:32 crc kubenswrapper[4818]: E0930 17:10:32.934719 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8d58532-c164-4d67-be63-2324034f1706" containerName="extract" Sep 30 17:10:32 crc kubenswrapper[4818]: I0930 17:10:32.934726 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8d58532-c164-4d67-be63-2324034f1706" containerName="extract" Sep 30 17:10:32 crc kubenswrapper[4818]: E0930 17:10:32.934739 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8d58532-c164-4d67-be63-2324034f1706" containerName="util" Sep 30 17:10:32 crc kubenswrapper[4818]: I0930 17:10:32.934746 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8d58532-c164-4d67-be63-2324034f1706" containerName="util" Sep 30 17:10:32 crc kubenswrapper[4818]: I0930 17:10:32.934858 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8d58532-c164-4d67-be63-2324034f1706" containerName="extract" Sep 30 17:10:32 crc kubenswrapper[4818]: I0930 17:10:32.936342 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5d6f6cfd66-qkrhb" Sep 30 17:10:32 crc kubenswrapper[4818]: I0930 17:10:32.938591 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-j7dcn" Sep 30 17:10:32 crc kubenswrapper[4818]: I0930 17:10:32.938600 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Sep 30 17:10:32 crc kubenswrapper[4818]: I0930 17:10:32.938945 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Sep 30 17:10:32 crc kubenswrapper[4818]: I0930 17:10:32.953831 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hk76c\" (UniqueName: \"kubernetes.io/projected/53a9a60d-25e2-4794-adb3-83cd8c2df8b5-kube-api-access-hk76c\") pod \"nmstate-operator-5d6f6cfd66-qkrhb\" (UID: \"53a9a60d-25e2-4794-adb3-83cd8c2df8b5\") " pod="openshift-nmstate/nmstate-operator-5d6f6cfd66-qkrhb" Sep 30 17:10:32 crc kubenswrapper[4818]: I0930 17:10:32.987200 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5d6f6cfd66-qkrhb"] Sep 30 17:10:33 crc kubenswrapper[4818]: I0930 17:10:33.054412 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hk76c\" (UniqueName: \"kubernetes.io/projected/53a9a60d-25e2-4794-adb3-83cd8c2df8b5-kube-api-access-hk76c\") pod \"nmstate-operator-5d6f6cfd66-qkrhb\" (UID: \"53a9a60d-25e2-4794-adb3-83cd8c2df8b5\") " pod="openshift-nmstate/nmstate-operator-5d6f6cfd66-qkrhb" Sep 30 17:10:33 crc kubenswrapper[4818]: I0930 17:10:33.076098 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hk76c\" (UniqueName: \"kubernetes.io/projected/53a9a60d-25e2-4794-adb3-83cd8c2df8b5-kube-api-access-hk76c\") pod \"nmstate-operator-5d6f6cfd66-qkrhb\" (UID: \"53a9a60d-25e2-4794-adb3-83cd8c2df8b5\") " pod="openshift-nmstate/nmstate-operator-5d6f6cfd66-qkrhb" Sep 30 17:10:33 crc kubenswrapper[4818]: I0930 17:10:33.259103 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5d6f6cfd66-qkrhb" Sep 30 17:10:33 crc kubenswrapper[4818]: I0930 17:10:33.539605 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5d6f6cfd66-qkrhb"] Sep 30 17:10:33 crc kubenswrapper[4818]: W0930 17:10:33.543092 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod53a9a60d_25e2_4794_adb3_83cd8c2df8b5.slice/crio-ff2ebceac69b59802cff50f50339fa617c16161d3c63269f2500723114bacf95 WatchSource:0}: Error finding container ff2ebceac69b59802cff50f50339fa617c16161d3c63269f2500723114bacf95: Status 404 returned error can't find the container with id ff2ebceac69b59802cff50f50339fa617c16161d3c63269f2500723114bacf95 Sep 30 17:10:33 crc kubenswrapper[4818]: I0930 17:10:33.780093 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5d6f6cfd66-qkrhb" event={"ID":"53a9a60d-25e2-4794-adb3-83cd8c2df8b5","Type":"ContainerStarted","Data":"ff2ebceac69b59802cff50f50339fa617c16161d3c63269f2500723114bacf95"} Sep 30 17:10:36 crc kubenswrapper[4818]: I0930 17:10:36.801454 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5d6f6cfd66-qkrhb" event={"ID":"53a9a60d-25e2-4794-adb3-83cd8c2df8b5","Type":"ContainerStarted","Data":"8c060abea6610d0c74bdda1ebfbea006998b3a17ebad5f9a3fa75537ccd74635"} Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.518309 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5d6f6cfd66-qkrhb" podStartSLOduration=8.172686504 podStartE2EDuration="10.51828644s" podCreationTimestamp="2025-09-30 17:10:32 +0000 UTC" firstStartedPulling="2025-09-30 17:10:33.544774292 +0000 UTC m=+680.299046108" lastFinishedPulling="2025-09-30 17:10:35.890374228 +0000 UTC m=+682.644646044" observedRunningTime="2025-09-30 17:10:36.822770412 +0000 UTC m=+683.577042268" watchObservedRunningTime="2025-09-30 17:10:42.51828644 +0000 UTC m=+689.272558266" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.522196 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-58fcddf996-zhjhv"] Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.523521 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-58fcddf996-zhjhv" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.525846 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-9dszg" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.597368 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6d689559c5-6ndvg"] Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.598252 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6d689559c5-6ndvg" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.601958 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-fdv7d"] Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.602307 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.602871 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-fdv7d" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.609438 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-58fcddf996-zhjhv"] Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.644888 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6d689559c5-6ndvg"] Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.688876 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzbd4\" (UniqueName: \"kubernetes.io/projected/b5de4ad7-8ab5-4b48-8c9f-504eaea0fd97-kube-api-access-vzbd4\") pod \"nmstate-metrics-58fcddf996-zhjhv\" (UID: \"b5de4ad7-8ab5-4b48-8c9f-504eaea0fd97\") " pod="openshift-nmstate/nmstate-metrics-58fcddf996-zhjhv" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.704793 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-864bb6dfb5-sdlfz"] Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.705662 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-sdlfz" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.708899 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.709035 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-lth6x" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.709112 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.723732 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-864bb6dfb5-sdlfz"] Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.789797 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/edeb5db5-c3ef-4f9a-ba10-94b51f80d98c-tls-key-pair\") pod \"nmstate-webhook-6d689559c5-6ndvg\" (UID: \"edeb5db5-c3ef-4f9a-ba10-94b51f80d98c\") " pod="openshift-nmstate/nmstate-webhook-6d689559c5-6ndvg" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.789847 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqwdr\" (UniqueName: \"kubernetes.io/projected/edeb5db5-c3ef-4f9a-ba10-94b51f80d98c-kube-api-access-fqwdr\") pod \"nmstate-webhook-6d689559c5-6ndvg\" (UID: \"edeb5db5-c3ef-4f9a-ba10-94b51f80d98c\") " pod="openshift-nmstate/nmstate-webhook-6d689559c5-6ndvg" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.789998 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gslm\" (UniqueName: \"kubernetes.io/projected/217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5-kube-api-access-6gslm\") pod \"nmstate-handler-fdv7d\" (UID: \"217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5\") " pod="openshift-nmstate/nmstate-handler-fdv7d" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.790034 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5-dbus-socket\") pod \"nmstate-handler-fdv7d\" (UID: \"217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5\") " pod="openshift-nmstate/nmstate-handler-fdv7d" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.790082 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vzbd4\" (UniqueName: \"kubernetes.io/projected/b5de4ad7-8ab5-4b48-8c9f-504eaea0fd97-kube-api-access-vzbd4\") pod \"nmstate-metrics-58fcddf996-zhjhv\" (UID: \"b5de4ad7-8ab5-4b48-8c9f-504eaea0fd97\") " pod="openshift-nmstate/nmstate-metrics-58fcddf996-zhjhv" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.790143 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5-ovs-socket\") pod \"nmstate-handler-fdv7d\" (UID: \"217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5\") " pod="openshift-nmstate/nmstate-handler-fdv7d" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.790185 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5-nmstate-lock\") pod \"nmstate-handler-fdv7d\" (UID: \"217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5\") " pod="openshift-nmstate/nmstate-handler-fdv7d" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.812007 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vzbd4\" (UniqueName: \"kubernetes.io/projected/b5de4ad7-8ab5-4b48-8c9f-504eaea0fd97-kube-api-access-vzbd4\") pod \"nmstate-metrics-58fcddf996-zhjhv\" (UID: \"b5de4ad7-8ab5-4b48-8c9f-504eaea0fd97\") " pod="openshift-nmstate/nmstate-metrics-58fcddf996-zhjhv" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.891213 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gslm\" (UniqueName: \"kubernetes.io/projected/217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5-kube-api-access-6gslm\") pod \"nmstate-handler-fdv7d\" (UID: \"217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5\") " pod="openshift-nmstate/nmstate-handler-fdv7d" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.891595 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5-dbus-socket\") pod \"nmstate-handler-fdv7d\" (UID: \"217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5\") " pod="openshift-nmstate/nmstate-handler-fdv7d" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.891673 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5-ovs-socket\") pod \"nmstate-handler-fdv7d\" (UID: \"217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5\") " pod="openshift-nmstate/nmstate-handler-fdv7d" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.891718 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5-nmstate-lock\") pod \"nmstate-handler-fdv7d\" (UID: \"217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5\") " pod="openshift-nmstate/nmstate-handler-fdv7d" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.891844 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/61d0be16-a287-4fcc-ba56-4ba51fa86b60-plugin-serving-cert\") pod \"nmstate-console-plugin-864bb6dfb5-sdlfz\" (UID: \"61d0be16-a287-4fcc-ba56-4ba51fa86b60\") " pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-sdlfz" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.891910 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/edeb5db5-c3ef-4f9a-ba10-94b51f80d98c-tls-key-pair\") pod \"nmstate-webhook-6d689559c5-6ndvg\" (UID: \"edeb5db5-c3ef-4f9a-ba10-94b51f80d98c\") " pod="openshift-nmstate/nmstate-webhook-6d689559c5-6ndvg" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.892019 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqwdr\" (UniqueName: \"kubernetes.io/projected/edeb5db5-c3ef-4f9a-ba10-94b51f80d98c-kube-api-access-fqwdr\") pod \"nmstate-webhook-6d689559c5-6ndvg\" (UID: \"edeb5db5-c3ef-4f9a-ba10-94b51f80d98c\") " pod="openshift-nmstate/nmstate-webhook-6d689559c5-6ndvg" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.892071 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/61d0be16-a287-4fcc-ba56-4ba51fa86b60-nginx-conf\") pod \"nmstate-console-plugin-864bb6dfb5-sdlfz\" (UID: \"61d0be16-a287-4fcc-ba56-4ba51fa86b60\") " pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-sdlfz" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.892098 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdg5b\" (UniqueName: \"kubernetes.io/projected/61d0be16-a287-4fcc-ba56-4ba51fa86b60-kube-api-access-cdg5b\") pod \"nmstate-console-plugin-864bb6dfb5-sdlfz\" (UID: \"61d0be16-a287-4fcc-ba56-4ba51fa86b60\") " pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-sdlfz" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.892112 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5-dbus-socket\") pod \"nmstate-handler-fdv7d\" (UID: \"217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5\") " pod="openshift-nmstate/nmstate-handler-fdv7d" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.892214 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5-ovs-socket\") pod \"nmstate-handler-fdv7d\" (UID: \"217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5\") " pod="openshift-nmstate/nmstate-handler-fdv7d" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.892652 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5-nmstate-lock\") pod \"nmstate-handler-fdv7d\" (UID: \"217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5\") " pod="openshift-nmstate/nmstate-handler-fdv7d" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.902985 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/edeb5db5-c3ef-4f9a-ba10-94b51f80d98c-tls-key-pair\") pod \"nmstate-webhook-6d689559c5-6ndvg\" (UID: \"edeb5db5-c3ef-4f9a-ba10-94b51f80d98c\") " pod="openshift-nmstate/nmstate-webhook-6d689559c5-6ndvg" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.903274 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-58fcddf996-zhjhv" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.911295 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-54465874f9-657tn"] Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.912231 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.917193 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-54465874f9-657tn"] Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.918960 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gslm\" (UniqueName: \"kubernetes.io/projected/217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5-kube-api-access-6gslm\") pod \"nmstate-handler-fdv7d\" (UID: \"217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5\") " pod="openshift-nmstate/nmstate-handler-fdv7d" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.923078 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqwdr\" (UniqueName: \"kubernetes.io/projected/edeb5db5-c3ef-4f9a-ba10-94b51f80d98c-kube-api-access-fqwdr\") pod \"nmstate-webhook-6d689559c5-6ndvg\" (UID: \"edeb5db5-c3ef-4f9a-ba10-94b51f80d98c\") " pod="openshift-nmstate/nmstate-webhook-6d689559c5-6ndvg" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.930192 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-fdv7d" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.992999 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/84d9da11-9e97-4595-a374-60d92eeb9737-console-serving-cert\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.993054 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/61d0be16-a287-4fcc-ba56-4ba51fa86b60-nginx-conf\") pod \"nmstate-console-plugin-864bb6dfb5-sdlfz\" (UID: \"61d0be16-a287-4fcc-ba56-4ba51fa86b60\") " pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-sdlfz" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.993094 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdg5b\" (UniqueName: \"kubernetes.io/projected/61d0be16-a287-4fcc-ba56-4ba51fa86b60-kube-api-access-cdg5b\") pod \"nmstate-console-plugin-864bb6dfb5-sdlfz\" (UID: \"61d0be16-a287-4fcc-ba56-4ba51fa86b60\") " pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-sdlfz" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.993128 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-service-ca\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.993150 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4twgr\" (UniqueName: \"kubernetes.io/projected/84d9da11-9e97-4595-a374-60d92eeb9737-kube-api-access-4twgr\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.993194 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/84d9da11-9e97-4595-a374-60d92eeb9737-console-oauth-config\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.993217 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-console-config\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.993245 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-oauth-serving-cert\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.993272 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/61d0be16-a287-4fcc-ba56-4ba51fa86b60-plugin-serving-cert\") pod \"nmstate-console-plugin-864bb6dfb5-sdlfz\" (UID: \"61d0be16-a287-4fcc-ba56-4ba51fa86b60\") " pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-sdlfz" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.993309 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-trusted-ca-bundle\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:42 crc kubenswrapper[4818]: I0930 17:10:42.994574 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/61d0be16-a287-4fcc-ba56-4ba51fa86b60-nginx-conf\") pod \"nmstate-console-plugin-864bb6dfb5-sdlfz\" (UID: \"61d0be16-a287-4fcc-ba56-4ba51fa86b60\") " pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-sdlfz" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.000886 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/61d0be16-a287-4fcc-ba56-4ba51fa86b60-plugin-serving-cert\") pod \"nmstate-console-plugin-864bb6dfb5-sdlfz\" (UID: \"61d0be16-a287-4fcc-ba56-4ba51fa86b60\") " pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-sdlfz" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.015109 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdg5b\" (UniqueName: \"kubernetes.io/projected/61d0be16-a287-4fcc-ba56-4ba51fa86b60-kube-api-access-cdg5b\") pod \"nmstate-console-plugin-864bb6dfb5-sdlfz\" (UID: \"61d0be16-a287-4fcc-ba56-4ba51fa86b60\") " pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-sdlfz" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.024649 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-sdlfz" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.094158 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-trusted-ca-bundle\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.094218 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/84d9da11-9e97-4595-a374-60d92eeb9737-console-serving-cert\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.094273 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-service-ca\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.094299 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4twgr\" (UniqueName: \"kubernetes.io/projected/84d9da11-9e97-4595-a374-60d92eeb9737-kube-api-access-4twgr\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.094345 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/84d9da11-9e97-4595-a374-60d92eeb9737-console-oauth-config\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.094367 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-console-config\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.094394 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-oauth-serving-cert\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.095845 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-service-ca\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.096982 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-trusted-ca-bundle\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.097311 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-oauth-serving-cert\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.099330 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/84d9da11-9e97-4595-a374-60d92eeb9737-console-serving-cert\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.100671 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/84d9da11-9e97-4595-a374-60d92eeb9737-console-oauth-config\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.110746 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-console-config\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.116864 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4twgr\" (UniqueName: \"kubernetes.io/projected/84d9da11-9e97-4595-a374-60d92eeb9737-kube-api-access-4twgr\") pod \"console-54465874f9-657tn\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.207325 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-864bb6dfb5-sdlfz"] Sep 30 17:10:43 crc kubenswrapper[4818]: W0930 17:10:43.211551 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod61d0be16_a287_4fcc_ba56_4ba51fa86b60.slice/crio-acc5d125e1eb890a2d499473042dce177279149d06946c5881937701d0938d61 WatchSource:0}: Error finding container acc5d125e1eb890a2d499473042dce177279149d06946c5881937701d0938d61: Status 404 returned error can't find the container with id acc5d125e1eb890a2d499473042dce177279149d06946c5881937701d0938d61 Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.221704 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6d689559c5-6ndvg" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.303717 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.324579 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-58fcddf996-zhjhv"] Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.390505 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6d689559c5-6ndvg"] Sep 30 17:10:43 crc kubenswrapper[4818]: W0930 17:10:43.406331 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podedeb5db5_c3ef_4f9a_ba10_94b51f80d98c.slice/crio-7e061883296b70ccf5c3bb30301426624878bf714d7afdef30cdf44233e7e86a WatchSource:0}: Error finding container 7e061883296b70ccf5c3bb30301426624878bf714d7afdef30cdf44233e7e86a: Status 404 returned error can't find the container with id 7e061883296b70ccf5c3bb30301426624878bf714d7afdef30cdf44233e7e86a Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.489077 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-54465874f9-657tn"] Sep 30 17:10:43 crc kubenswrapper[4818]: W0930 17:10:43.496856 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod84d9da11_9e97_4595_a374_60d92eeb9737.slice/crio-3e5a0f58e8ffe653990a80b2e67bb9d782270d588a816e8305ff30ca71429c23 WatchSource:0}: Error finding container 3e5a0f58e8ffe653990a80b2e67bb9d782270d588a816e8305ff30ca71429c23: Status 404 returned error can't find the container with id 3e5a0f58e8ffe653990a80b2e67bb9d782270d588a816e8305ff30ca71429c23 Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.848238 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6d689559c5-6ndvg" event={"ID":"edeb5db5-c3ef-4f9a-ba10-94b51f80d98c","Type":"ContainerStarted","Data":"7e061883296b70ccf5c3bb30301426624878bf714d7afdef30cdf44233e7e86a"} Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.849604 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-fdv7d" event={"ID":"217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5","Type":"ContainerStarted","Data":"af38e7bfdc69d2e3143224757a79ff590c7801d8a38612935a1ad2c79d2c4e27"} Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.851629 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-54465874f9-657tn" event={"ID":"84d9da11-9e97-4595-a374-60d92eeb9737","Type":"ContainerStarted","Data":"7f997279a3a2a2fd867b90485a3718728f05465f1dfa1e9cd97cc2895ea4f5f1"} Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.851672 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-54465874f9-657tn" event={"ID":"84d9da11-9e97-4595-a374-60d92eeb9737","Type":"ContainerStarted","Data":"3e5a0f58e8ffe653990a80b2e67bb9d782270d588a816e8305ff30ca71429c23"} Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.852777 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-sdlfz" event={"ID":"61d0be16-a287-4fcc-ba56-4ba51fa86b60","Type":"ContainerStarted","Data":"acc5d125e1eb890a2d499473042dce177279149d06946c5881937701d0938d61"} Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.854133 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-58fcddf996-zhjhv" event={"ID":"b5de4ad7-8ab5-4b48-8c9f-504eaea0fd97","Type":"ContainerStarted","Data":"50b3f72052a40dd65f871e63a2d675435806b530e93e27673c7f35ec5835fb6f"} Sep 30 17:10:43 crc kubenswrapper[4818]: I0930 17:10:43.877702 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-54465874f9-657tn" podStartSLOduration=1.877652292 podStartE2EDuration="1.877652292s" podCreationTimestamp="2025-09-30 17:10:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:10:43.876823929 +0000 UTC m=+690.631095815" watchObservedRunningTime="2025-09-30 17:10:43.877652292 +0000 UTC m=+690.631924128" Sep 30 17:10:46 crc kubenswrapper[4818]: I0930 17:10:46.876998 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-58fcddf996-zhjhv" event={"ID":"b5de4ad7-8ab5-4b48-8c9f-504eaea0fd97","Type":"ContainerStarted","Data":"c039fecdd4962bf0f74b678594e76a14fa189e21af20129343bf7cbe5d2f61b8"} Sep 30 17:10:46 crc kubenswrapper[4818]: I0930 17:10:46.878536 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6d689559c5-6ndvg" event={"ID":"edeb5db5-c3ef-4f9a-ba10-94b51f80d98c","Type":"ContainerStarted","Data":"18b42af981fea6a228218cacd1d18d957006c36a3ddd9c7d40ae471c497f8568"} Sep 30 17:10:46 crc kubenswrapper[4818]: I0930 17:10:46.878814 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6d689559c5-6ndvg" Sep 30 17:10:46 crc kubenswrapper[4818]: I0930 17:10:46.881322 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-fdv7d" event={"ID":"217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5","Type":"ContainerStarted","Data":"b49f5f6dcb17d9cbf6ca1d208e187ac86c007c56d3c7136fd336eb713a51519f"} Sep 30 17:10:46 crc kubenswrapper[4818]: I0930 17:10:46.881602 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-fdv7d" Sep 30 17:10:46 crc kubenswrapper[4818]: I0930 17:10:46.885803 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-sdlfz" event={"ID":"61d0be16-a287-4fcc-ba56-4ba51fa86b60","Type":"ContainerStarted","Data":"c6a6006e3a8e2b48fc56284a42e3cc48089b38fe1e6fd6c946d0c8595f6683e5"} Sep 30 17:10:46 crc kubenswrapper[4818]: I0930 17:10:46.910305 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6d689559c5-6ndvg" podStartSLOduration=2.072102939 podStartE2EDuration="4.910280093s" podCreationTimestamp="2025-09-30 17:10:42 +0000 UTC" firstStartedPulling="2025-09-30 17:10:43.409111546 +0000 UTC m=+690.163383362" lastFinishedPulling="2025-09-30 17:10:46.24728867 +0000 UTC m=+693.001560516" observedRunningTime="2025-09-30 17:10:46.896264502 +0000 UTC m=+693.650536318" watchObservedRunningTime="2025-09-30 17:10:46.910280093 +0000 UTC m=+693.664551909" Sep 30 17:10:46 crc kubenswrapper[4818]: I0930 17:10:46.932781 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-sdlfz" podStartSLOduration=1.904796519 podStartE2EDuration="4.932758023s" podCreationTimestamp="2025-09-30 17:10:42 +0000 UTC" firstStartedPulling="2025-09-30 17:10:43.213743954 +0000 UTC m=+689.968015770" lastFinishedPulling="2025-09-30 17:10:46.241705418 +0000 UTC m=+692.995977274" observedRunningTime="2025-09-30 17:10:46.920022927 +0000 UTC m=+693.674294773" watchObservedRunningTime="2025-09-30 17:10:46.932758023 +0000 UTC m=+693.687029839" Sep 30 17:10:46 crc kubenswrapper[4818]: I0930 17:10:46.949545 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-fdv7d" podStartSLOduration=1.640419533 podStartE2EDuration="4.949528008s" podCreationTimestamp="2025-09-30 17:10:42 +0000 UTC" firstStartedPulling="2025-09-30 17:10:42.954413046 +0000 UTC m=+689.708684862" lastFinishedPulling="2025-09-30 17:10:46.263521511 +0000 UTC m=+693.017793337" observedRunningTime="2025-09-30 17:10:46.937715257 +0000 UTC m=+693.691987123" watchObservedRunningTime="2025-09-30 17:10:46.949528008 +0000 UTC m=+693.703799824" Sep 30 17:10:48 crc kubenswrapper[4818]: I0930 17:10:48.902980 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-58fcddf996-zhjhv" event={"ID":"b5de4ad7-8ab5-4b48-8c9f-504eaea0fd97","Type":"ContainerStarted","Data":"7b8c7a9d1765f8de35c4e49d58c0257a7327bc38d30287db655321fcbcddcc83"} Sep 30 17:10:48 crc kubenswrapper[4818]: I0930 17:10:48.930154 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-58fcddf996-zhjhv" podStartSLOduration=1.667488149 podStartE2EDuration="6.930134678s" podCreationTimestamp="2025-09-30 17:10:42 +0000 UTC" firstStartedPulling="2025-09-30 17:10:43.341725858 +0000 UTC m=+690.095997684" lastFinishedPulling="2025-09-30 17:10:48.604372377 +0000 UTC m=+695.358644213" observedRunningTime="2025-09-30 17:10:48.92467634 +0000 UTC m=+695.678948196" watchObservedRunningTime="2025-09-30 17:10:48.930134678 +0000 UTC m=+695.684406504" Sep 30 17:10:52 crc kubenswrapper[4818]: I0930 17:10:52.597361 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:10:52 crc kubenswrapper[4818]: I0930 17:10:52.597684 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:10:52 crc kubenswrapper[4818]: I0930 17:10:52.971311 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-fdv7d" Sep 30 17:10:53 crc kubenswrapper[4818]: I0930 17:10:53.304715 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:53 crc kubenswrapper[4818]: I0930 17:10:53.305208 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:53 crc kubenswrapper[4818]: I0930 17:10:53.312582 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:53 crc kubenswrapper[4818]: I0930 17:10:53.945476 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-54465874f9-657tn" Sep 30 17:10:54 crc kubenswrapper[4818]: I0930 17:10:54.010148 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-wr9kd"] Sep 30 17:11:03 crc kubenswrapper[4818]: I0930 17:11:03.243190 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6d689559c5-6ndvg" Sep 30 17:11:18 crc kubenswrapper[4818]: I0930 17:11:18.031353 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd"] Sep 30 17:11:18 crc kubenswrapper[4818]: I0930 17:11:18.033267 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd" Sep 30 17:11:18 crc kubenswrapper[4818]: I0930 17:11:18.035268 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Sep 30 17:11:18 crc kubenswrapper[4818]: I0930 17:11:18.046374 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd"] Sep 30 17:11:18 crc kubenswrapper[4818]: I0930 17:11:18.164981 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e15bd4a3-2980-4e48-b222-988af1b45bb4-bundle\") pod \"f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd\" (UID: \"e15bd4a3-2980-4e48-b222-988af1b45bb4\") " pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd" Sep 30 17:11:18 crc kubenswrapper[4818]: I0930 17:11:18.165117 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e15bd4a3-2980-4e48-b222-988af1b45bb4-util\") pod \"f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd\" (UID: \"e15bd4a3-2980-4e48-b222-988af1b45bb4\") " pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd" Sep 30 17:11:18 crc kubenswrapper[4818]: I0930 17:11:18.165145 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bjd8\" (UniqueName: \"kubernetes.io/projected/e15bd4a3-2980-4e48-b222-988af1b45bb4-kube-api-access-9bjd8\") pod \"f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd\" (UID: \"e15bd4a3-2980-4e48-b222-988af1b45bb4\") " pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd" Sep 30 17:11:18 crc kubenswrapper[4818]: I0930 17:11:18.266806 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e15bd4a3-2980-4e48-b222-988af1b45bb4-util\") pod \"f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd\" (UID: \"e15bd4a3-2980-4e48-b222-988af1b45bb4\") " pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd" Sep 30 17:11:18 crc kubenswrapper[4818]: I0930 17:11:18.266848 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bjd8\" (UniqueName: \"kubernetes.io/projected/e15bd4a3-2980-4e48-b222-988af1b45bb4-kube-api-access-9bjd8\") pod \"f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd\" (UID: \"e15bd4a3-2980-4e48-b222-988af1b45bb4\") " pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd" Sep 30 17:11:18 crc kubenswrapper[4818]: I0930 17:11:18.266879 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e15bd4a3-2980-4e48-b222-988af1b45bb4-bundle\") pod \"f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd\" (UID: \"e15bd4a3-2980-4e48-b222-988af1b45bb4\") " pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd" Sep 30 17:11:18 crc kubenswrapper[4818]: I0930 17:11:18.267377 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e15bd4a3-2980-4e48-b222-988af1b45bb4-bundle\") pod \"f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd\" (UID: \"e15bd4a3-2980-4e48-b222-988af1b45bb4\") " pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd" Sep 30 17:11:18 crc kubenswrapper[4818]: I0930 17:11:18.268116 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e15bd4a3-2980-4e48-b222-988af1b45bb4-util\") pod \"f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd\" (UID: \"e15bd4a3-2980-4e48-b222-988af1b45bb4\") " pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd" Sep 30 17:11:18 crc kubenswrapper[4818]: I0930 17:11:18.293161 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bjd8\" (UniqueName: \"kubernetes.io/projected/e15bd4a3-2980-4e48-b222-988af1b45bb4-kube-api-access-9bjd8\") pod \"f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd\" (UID: \"e15bd4a3-2980-4e48-b222-988af1b45bb4\") " pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd" Sep 30 17:11:18 crc kubenswrapper[4818]: I0930 17:11:18.349321 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd" Sep 30 17:11:18 crc kubenswrapper[4818]: I0930 17:11:18.632117 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd"] Sep 30 17:11:18 crc kubenswrapper[4818]: W0930 17:11:18.636046 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode15bd4a3_2980_4e48_b222_988af1b45bb4.slice/crio-752b5d194105f53b0bde29db3365264c9c21fa58a2129ba609e9bb4217cf2cc2 WatchSource:0}: Error finding container 752b5d194105f53b0bde29db3365264c9c21fa58a2129ba609e9bb4217cf2cc2: Status 404 returned error can't find the container with id 752b5d194105f53b0bde29db3365264c9c21fa58a2129ba609e9bb4217cf2cc2 Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.084072 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-wr9kd" podUID="4709760d-9993-42d3-97c3-bd5470b9c8ab" containerName="console" containerID="cri-o://ab902c35b7553bc93a232aa3d2209531d4ed4a1f1235bf7032864d69def1f6e0" gracePeriod=15 Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.128382 4818 generic.go:334] "Generic (PLEG): container finished" podID="e15bd4a3-2980-4e48-b222-988af1b45bb4" containerID="e802c8d4fa7b1858c33e26f3b4ccb50c790e39acc7c2e80007ac63d7427e249f" exitCode=0 Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.128463 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd" event={"ID":"e15bd4a3-2980-4e48-b222-988af1b45bb4","Type":"ContainerDied","Data":"e802c8d4fa7b1858c33e26f3b4ccb50c790e39acc7c2e80007ac63d7427e249f"} Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.128542 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd" event={"ID":"e15bd4a3-2980-4e48-b222-988af1b45bb4","Type":"ContainerStarted","Data":"752b5d194105f53b0bde29db3365264c9c21fa58a2129ba609e9bb4217cf2cc2"} Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.446196 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-wr9kd_4709760d-9993-42d3-97c3-bd5470b9c8ab/console/0.log" Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.446387 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.483609 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-service-ca\") pod \"4709760d-9993-42d3-97c3-bd5470b9c8ab\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.483670 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m6qmz\" (UniqueName: \"kubernetes.io/projected/4709760d-9993-42d3-97c3-bd5470b9c8ab-kube-api-access-m6qmz\") pod \"4709760d-9993-42d3-97c3-bd5470b9c8ab\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.483688 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4709760d-9993-42d3-97c3-bd5470b9c8ab-console-serving-cert\") pod \"4709760d-9993-42d3-97c3-bd5470b9c8ab\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.483713 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-console-config\") pod \"4709760d-9993-42d3-97c3-bd5470b9c8ab\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.483769 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4709760d-9993-42d3-97c3-bd5470b9c8ab-console-oauth-config\") pod \"4709760d-9993-42d3-97c3-bd5470b9c8ab\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.483811 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-trusted-ca-bundle\") pod \"4709760d-9993-42d3-97c3-bd5470b9c8ab\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.483839 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-oauth-serving-cert\") pod \"4709760d-9993-42d3-97c3-bd5470b9c8ab\" (UID: \"4709760d-9993-42d3-97c3-bd5470b9c8ab\") " Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.485209 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "4709760d-9993-42d3-97c3-bd5470b9c8ab" (UID: "4709760d-9993-42d3-97c3-bd5470b9c8ab"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.485577 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-service-ca" (OuterVolumeSpecName: "service-ca") pod "4709760d-9993-42d3-97c3-bd5470b9c8ab" (UID: "4709760d-9993-42d3-97c3-bd5470b9c8ab"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.486809 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "4709760d-9993-42d3-97c3-bd5470b9c8ab" (UID: "4709760d-9993-42d3-97c3-bd5470b9c8ab"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.487092 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-console-config" (OuterVolumeSpecName: "console-config") pod "4709760d-9993-42d3-97c3-bd5470b9c8ab" (UID: "4709760d-9993-42d3-97c3-bd5470b9c8ab"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.494788 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4709760d-9993-42d3-97c3-bd5470b9c8ab-kube-api-access-m6qmz" (OuterVolumeSpecName: "kube-api-access-m6qmz") pod "4709760d-9993-42d3-97c3-bd5470b9c8ab" (UID: "4709760d-9993-42d3-97c3-bd5470b9c8ab"). InnerVolumeSpecName "kube-api-access-m6qmz". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.495225 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4709760d-9993-42d3-97c3-bd5470b9c8ab-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "4709760d-9993-42d3-97c3-bd5470b9c8ab" (UID: "4709760d-9993-42d3-97c3-bd5470b9c8ab"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.495649 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4709760d-9993-42d3-97c3-bd5470b9c8ab-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "4709760d-9993-42d3-97c3-bd5470b9c8ab" (UID: "4709760d-9993-42d3-97c3-bd5470b9c8ab"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.586559 4818 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.586627 4818 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.586649 4818 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-service-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.586666 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m6qmz\" (UniqueName: \"kubernetes.io/projected/4709760d-9993-42d3-97c3-bd5470b9c8ab-kube-api-access-m6qmz\") on node \"crc\" DevicePath \"\"" Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.586686 4818 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4709760d-9993-42d3-97c3-bd5470b9c8ab-console-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.586702 4818 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4709760d-9993-42d3-97c3-bd5470b9c8ab-console-config\") on node \"crc\" DevicePath \"\"" Sep 30 17:11:19 crc kubenswrapper[4818]: I0930 17:11:19.586718 4818 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4709760d-9993-42d3-97c3-bd5470b9c8ab-console-oauth-config\") on node \"crc\" DevicePath \"\"" Sep 30 17:11:20 crc kubenswrapper[4818]: I0930 17:11:20.137026 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-wr9kd_4709760d-9993-42d3-97c3-bd5470b9c8ab/console/0.log" Sep 30 17:11:20 crc kubenswrapper[4818]: I0930 17:11:20.137110 4818 generic.go:334] "Generic (PLEG): container finished" podID="4709760d-9993-42d3-97c3-bd5470b9c8ab" containerID="ab902c35b7553bc93a232aa3d2209531d4ed4a1f1235bf7032864d69def1f6e0" exitCode=2 Sep 30 17:11:20 crc kubenswrapper[4818]: I0930 17:11:20.137165 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-wr9kd" event={"ID":"4709760d-9993-42d3-97c3-bd5470b9c8ab","Type":"ContainerDied","Data":"ab902c35b7553bc93a232aa3d2209531d4ed4a1f1235bf7032864d69def1f6e0"} Sep 30 17:11:20 crc kubenswrapper[4818]: I0930 17:11:20.137216 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-wr9kd" event={"ID":"4709760d-9993-42d3-97c3-bd5470b9c8ab","Type":"ContainerDied","Data":"98e66d2e996d2d1fe2de5ba492a09639c6dfa5ba0425ab1d47f32815ad7abf70"} Sep 30 17:11:20 crc kubenswrapper[4818]: I0930 17:11:20.137244 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-wr9kd" Sep 30 17:11:20 crc kubenswrapper[4818]: I0930 17:11:20.137253 4818 scope.go:117] "RemoveContainer" containerID="ab902c35b7553bc93a232aa3d2209531d4ed4a1f1235bf7032864d69def1f6e0" Sep 30 17:11:20 crc kubenswrapper[4818]: I0930 17:11:20.161261 4818 scope.go:117] "RemoveContainer" containerID="ab902c35b7553bc93a232aa3d2209531d4ed4a1f1235bf7032864d69def1f6e0" Sep 30 17:11:20 crc kubenswrapper[4818]: E0930 17:11:20.161688 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab902c35b7553bc93a232aa3d2209531d4ed4a1f1235bf7032864d69def1f6e0\": container with ID starting with ab902c35b7553bc93a232aa3d2209531d4ed4a1f1235bf7032864d69def1f6e0 not found: ID does not exist" containerID="ab902c35b7553bc93a232aa3d2209531d4ed4a1f1235bf7032864d69def1f6e0" Sep 30 17:11:20 crc kubenswrapper[4818]: I0930 17:11:20.161725 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab902c35b7553bc93a232aa3d2209531d4ed4a1f1235bf7032864d69def1f6e0"} err="failed to get container status \"ab902c35b7553bc93a232aa3d2209531d4ed4a1f1235bf7032864d69def1f6e0\": rpc error: code = NotFound desc = could not find container \"ab902c35b7553bc93a232aa3d2209531d4ed4a1f1235bf7032864d69def1f6e0\": container with ID starting with ab902c35b7553bc93a232aa3d2209531d4ed4a1f1235bf7032864d69def1f6e0 not found: ID does not exist" Sep 30 17:11:20 crc kubenswrapper[4818]: I0930 17:11:20.163428 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-wr9kd"] Sep 30 17:11:20 crc kubenswrapper[4818]: I0930 17:11:20.168215 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-wr9kd"] Sep 30 17:11:21 crc kubenswrapper[4818]: I0930 17:11:21.150281 4818 generic.go:334] "Generic (PLEG): container finished" podID="e15bd4a3-2980-4e48-b222-988af1b45bb4" containerID="bfbf22c32cfb7b3d44d323a236308bc8828c8dd8d22c47a177d8f924d8e38796" exitCode=0 Sep 30 17:11:21 crc kubenswrapper[4818]: I0930 17:11:21.150339 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd" event={"ID":"e15bd4a3-2980-4e48-b222-988af1b45bb4","Type":"ContainerDied","Data":"bfbf22c32cfb7b3d44d323a236308bc8828c8dd8d22c47a177d8f924d8e38796"} Sep 30 17:11:22 crc kubenswrapper[4818]: I0930 17:11:22.028072 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4709760d-9993-42d3-97c3-bd5470b9c8ab" path="/var/lib/kubelet/pods/4709760d-9993-42d3-97c3-bd5470b9c8ab/volumes" Sep 30 17:11:22 crc kubenswrapper[4818]: I0930 17:11:22.158278 4818 generic.go:334] "Generic (PLEG): container finished" podID="e15bd4a3-2980-4e48-b222-988af1b45bb4" containerID="51051faff270512e58c1348401ac76f01709df479b65f0e23505c0fa708d2044" exitCode=0 Sep 30 17:11:22 crc kubenswrapper[4818]: I0930 17:11:22.158344 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd" event={"ID":"e15bd4a3-2980-4e48-b222-988af1b45bb4","Type":"ContainerDied","Data":"51051faff270512e58c1348401ac76f01709df479b65f0e23505c0fa708d2044"} Sep 30 17:11:22 crc kubenswrapper[4818]: I0930 17:11:22.596039 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:11:22 crc kubenswrapper[4818]: I0930 17:11:22.596377 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:11:23 crc kubenswrapper[4818]: I0930 17:11:23.496097 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd" Sep 30 17:11:23 crc kubenswrapper[4818]: I0930 17:11:23.539608 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e15bd4a3-2980-4e48-b222-988af1b45bb4-util\") pod \"e15bd4a3-2980-4e48-b222-988af1b45bb4\" (UID: \"e15bd4a3-2980-4e48-b222-988af1b45bb4\") " Sep 30 17:11:23 crc kubenswrapper[4818]: I0930 17:11:23.539836 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e15bd4a3-2980-4e48-b222-988af1b45bb4-bundle\") pod \"e15bd4a3-2980-4e48-b222-988af1b45bb4\" (UID: \"e15bd4a3-2980-4e48-b222-988af1b45bb4\") " Sep 30 17:11:23 crc kubenswrapper[4818]: I0930 17:11:23.539866 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9bjd8\" (UniqueName: \"kubernetes.io/projected/e15bd4a3-2980-4e48-b222-988af1b45bb4-kube-api-access-9bjd8\") pod \"e15bd4a3-2980-4e48-b222-988af1b45bb4\" (UID: \"e15bd4a3-2980-4e48-b222-988af1b45bb4\") " Sep 30 17:11:23 crc kubenswrapper[4818]: I0930 17:11:23.541288 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e15bd4a3-2980-4e48-b222-988af1b45bb4-bundle" (OuterVolumeSpecName: "bundle") pod "e15bd4a3-2980-4e48-b222-988af1b45bb4" (UID: "e15bd4a3-2980-4e48-b222-988af1b45bb4"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:11:23 crc kubenswrapper[4818]: I0930 17:11:23.548204 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e15bd4a3-2980-4e48-b222-988af1b45bb4-kube-api-access-9bjd8" (OuterVolumeSpecName: "kube-api-access-9bjd8") pod "e15bd4a3-2980-4e48-b222-988af1b45bb4" (UID: "e15bd4a3-2980-4e48-b222-988af1b45bb4"). InnerVolumeSpecName "kube-api-access-9bjd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:11:23 crc kubenswrapper[4818]: I0930 17:11:23.559577 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e15bd4a3-2980-4e48-b222-988af1b45bb4-util" (OuterVolumeSpecName: "util") pod "e15bd4a3-2980-4e48-b222-988af1b45bb4" (UID: "e15bd4a3-2980-4e48-b222-988af1b45bb4"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:11:23 crc kubenswrapper[4818]: I0930 17:11:23.641893 4818 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e15bd4a3-2980-4e48-b222-988af1b45bb4-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:11:23 crc kubenswrapper[4818]: I0930 17:11:23.641917 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9bjd8\" (UniqueName: \"kubernetes.io/projected/e15bd4a3-2980-4e48-b222-988af1b45bb4-kube-api-access-9bjd8\") on node \"crc\" DevicePath \"\"" Sep 30 17:11:23 crc kubenswrapper[4818]: I0930 17:11:23.641940 4818 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e15bd4a3-2980-4e48-b222-988af1b45bb4-util\") on node \"crc\" DevicePath \"\"" Sep 30 17:11:24 crc kubenswrapper[4818]: I0930 17:11:24.172031 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd" event={"ID":"e15bd4a3-2980-4e48-b222-988af1b45bb4","Type":"ContainerDied","Data":"752b5d194105f53b0bde29db3365264c9c21fa58a2129ba609e9bb4217cf2cc2"} Sep 30 17:11:24 crc kubenswrapper[4818]: I0930 17:11:24.172543 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="752b5d194105f53b0bde29db3365264c9c21fa58a2129ba609e9bb4217cf2cc2" Sep 30 17:11:24 crc kubenswrapper[4818]: I0930 17:11:24.172138 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.770755 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-6f59b986b5-d4h6m"] Sep 30 17:11:32 crc kubenswrapper[4818]: E0930 17:11:32.771521 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e15bd4a3-2980-4e48-b222-988af1b45bb4" containerName="util" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.771536 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="e15bd4a3-2980-4e48-b222-988af1b45bb4" containerName="util" Sep 30 17:11:32 crc kubenswrapper[4818]: E0930 17:11:32.771548 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e15bd4a3-2980-4e48-b222-988af1b45bb4" containerName="extract" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.771558 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="e15bd4a3-2980-4e48-b222-988af1b45bb4" containerName="extract" Sep 30 17:11:32 crc kubenswrapper[4818]: E0930 17:11:32.771572 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4709760d-9993-42d3-97c3-bd5470b9c8ab" containerName="console" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.771579 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="4709760d-9993-42d3-97c3-bd5470b9c8ab" containerName="console" Sep 30 17:11:32 crc kubenswrapper[4818]: E0930 17:11:32.771609 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e15bd4a3-2980-4e48-b222-988af1b45bb4" containerName="pull" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.771617 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="e15bd4a3-2980-4e48-b222-988af1b45bb4" containerName="pull" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.771729 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="e15bd4a3-2980-4e48-b222-988af1b45bb4" containerName="extract" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.771745 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="4709760d-9993-42d3-97c3-bd5470b9c8ab" containerName="console" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.772251 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6f59b986b5-d4h6m" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.776277 4818 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.776354 4818 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.776700 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.778622 4818 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-4z8nm" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.778680 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.795480 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6f59b986b5-d4h6m"] Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.861819 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49z8q\" (UniqueName: \"kubernetes.io/projected/a43b85d0-34d6-49ad-9fc0-7580b2f2ef36-kube-api-access-49z8q\") pod \"metallb-operator-controller-manager-6f59b986b5-d4h6m\" (UID: \"a43b85d0-34d6-49ad-9fc0-7580b2f2ef36\") " pod="metallb-system/metallb-operator-controller-manager-6f59b986b5-d4h6m" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.861907 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a43b85d0-34d6-49ad-9fc0-7580b2f2ef36-apiservice-cert\") pod \"metallb-operator-controller-manager-6f59b986b5-d4h6m\" (UID: \"a43b85d0-34d6-49ad-9fc0-7580b2f2ef36\") " pod="metallb-system/metallb-operator-controller-manager-6f59b986b5-d4h6m" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.861971 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a43b85d0-34d6-49ad-9fc0-7580b2f2ef36-webhook-cert\") pod \"metallb-operator-controller-manager-6f59b986b5-d4h6m\" (UID: \"a43b85d0-34d6-49ad-9fc0-7580b2f2ef36\") " pod="metallb-system/metallb-operator-controller-manager-6f59b986b5-d4h6m" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.963046 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a43b85d0-34d6-49ad-9fc0-7580b2f2ef36-apiservice-cert\") pod \"metallb-operator-controller-manager-6f59b986b5-d4h6m\" (UID: \"a43b85d0-34d6-49ad-9fc0-7580b2f2ef36\") " pod="metallb-system/metallb-operator-controller-manager-6f59b986b5-d4h6m" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.963136 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a43b85d0-34d6-49ad-9fc0-7580b2f2ef36-webhook-cert\") pod \"metallb-operator-controller-manager-6f59b986b5-d4h6m\" (UID: \"a43b85d0-34d6-49ad-9fc0-7580b2f2ef36\") " pod="metallb-system/metallb-operator-controller-manager-6f59b986b5-d4h6m" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.963373 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49z8q\" (UniqueName: \"kubernetes.io/projected/a43b85d0-34d6-49ad-9fc0-7580b2f2ef36-kube-api-access-49z8q\") pod \"metallb-operator-controller-manager-6f59b986b5-d4h6m\" (UID: \"a43b85d0-34d6-49ad-9fc0-7580b2f2ef36\") " pod="metallb-system/metallb-operator-controller-manager-6f59b986b5-d4h6m" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.970543 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a43b85d0-34d6-49ad-9fc0-7580b2f2ef36-apiservice-cert\") pod \"metallb-operator-controller-manager-6f59b986b5-d4h6m\" (UID: \"a43b85d0-34d6-49ad-9fc0-7580b2f2ef36\") " pod="metallb-system/metallb-operator-controller-manager-6f59b986b5-d4h6m" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.977391 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a43b85d0-34d6-49ad-9fc0-7580b2f2ef36-webhook-cert\") pod \"metallb-operator-controller-manager-6f59b986b5-d4h6m\" (UID: \"a43b85d0-34d6-49ad-9fc0-7580b2f2ef36\") " pod="metallb-system/metallb-operator-controller-manager-6f59b986b5-d4h6m" Sep 30 17:11:32 crc kubenswrapper[4818]: I0930 17:11:32.980641 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49z8q\" (UniqueName: \"kubernetes.io/projected/a43b85d0-34d6-49ad-9fc0-7580b2f2ef36-kube-api-access-49z8q\") pod \"metallb-operator-controller-manager-6f59b986b5-d4h6m\" (UID: \"a43b85d0-34d6-49ad-9fc0-7580b2f2ef36\") " pod="metallb-system/metallb-operator-controller-manager-6f59b986b5-d4h6m" Sep 30 17:11:33 crc kubenswrapper[4818]: I0930 17:11:33.079594 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-795889b56f-gt7lt"] Sep 30 17:11:33 crc kubenswrapper[4818]: I0930 17:11:33.080487 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-795889b56f-gt7lt" Sep 30 17:11:33 crc kubenswrapper[4818]: I0930 17:11:33.084318 4818 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Sep 30 17:11:33 crc kubenswrapper[4818]: I0930 17:11:33.085913 4818 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Sep 30 17:11:33 crc kubenswrapper[4818]: I0930 17:11:33.086083 4818 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-fcpvv" Sep 30 17:11:33 crc kubenswrapper[4818]: I0930 17:11:33.089220 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6f59b986b5-d4h6m" Sep 30 17:11:33 crc kubenswrapper[4818]: I0930 17:11:33.136587 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-795889b56f-gt7lt"] Sep 30 17:11:33 crc kubenswrapper[4818]: I0930 17:11:33.164708 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfmlq\" (UniqueName: \"kubernetes.io/projected/f1beebb1-d722-453d-ab32-f986bfd746df-kube-api-access-vfmlq\") pod \"metallb-operator-webhook-server-795889b56f-gt7lt\" (UID: \"f1beebb1-d722-453d-ab32-f986bfd746df\") " pod="metallb-system/metallb-operator-webhook-server-795889b56f-gt7lt" Sep 30 17:11:33 crc kubenswrapper[4818]: I0930 17:11:33.164752 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f1beebb1-d722-453d-ab32-f986bfd746df-apiservice-cert\") pod \"metallb-operator-webhook-server-795889b56f-gt7lt\" (UID: \"f1beebb1-d722-453d-ab32-f986bfd746df\") " pod="metallb-system/metallb-operator-webhook-server-795889b56f-gt7lt" Sep 30 17:11:33 crc kubenswrapper[4818]: I0930 17:11:33.164780 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f1beebb1-d722-453d-ab32-f986bfd746df-webhook-cert\") pod \"metallb-operator-webhook-server-795889b56f-gt7lt\" (UID: \"f1beebb1-d722-453d-ab32-f986bfd746df\") " pod="metallb-system/metallb-operator-webhook-server-795889b56f-gt7lt" Sep 30 17:11:33 crc kubenswrapper[4818]: I0930 17:11:33.265745 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f1beebb1-d722-453d-ab32-f986bfd746df-apiservice-cert\") pod \"metallb-operator-webhook-server-795889b56f-gt7lt\" (UID: \"f1beebb1-d722-453d-ab32-f986bfd746df\") " pod="metallb-system/metallb-operator-webhook-server-795889b56f-gt7lt" Sep 30 17:11:33 crc kubenswrapper[4818]: I0930 17:11:33.266026 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfmlq\" (UniqueName: \"kubernetes.io/projected/f1beebb1-d722-453d-ab32-f986bfd746df-kube-api-access-vfmlq\") pod \"metallb-operator-webhook-server-795889b56f-gt7lt\" (UID: \"f1beebb1-d722-453d-ab32-f986bfd746df\") " pod="metallb-system/metallb-operator-webhook-server-795889b56f-gt7lt" Sep 30 17:11:33 crc kubenswrapper[4818]: I0930 17:11:33.266053 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f1beebb1-d722-453d-ab32-f986bfd746df-webhook-cert\") pod \"metallb-operator-webhook-server-795889b56f-gt7lt\" (UID: \"f1beebb1-d722-453d-ab32-f986bfd746df\") " pod="metallb-system/metallb-operator-webhook-server-795889b56f-gt7lt" Sep 30 17:11:33 crc kubenswrapper[4818]: I0930 17:11:33.270070 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f1beebb1-d722-453d-ab32-f986bfd746df-apiservice-cert\") pod \"metallb-operator-webhook-server-795889b56f-gt7lt\" (UID: \"f1beebb1-d722-453d-ab32-f986bfd746df\") " pod="metallb-system/metallb-operator-webhook-server-795889b56f-gt7lt" Sep 30 17:11:33 crc kubenswrapper[4818]: I0930 17:11:33.270805 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f1beebb1-d722-453d-ab32-f986bfd746df-webhook-cert\") pod \"metallb-operator-webhook-server-795889b56f-gt7lt\" (UID: \"f1beebb1-d722-453d-ab32-f986bfd746df\") " pod="metallb-system/metallb-operator-webhook-server-795889b56f-gt7lt" Sep 30 17:11:33 crc kubenswrapper[4818]: I0930 17:11:33.283972 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfmlq\" (UniqueName: \"kubernetes.io/projected/f1beebb1-d722-453d-ab32-f986bfd746df-kube-api-access-vfmlq\") pod \"metallb-operator-webhook-server-795889b56f-gt7lt\" (UID: \"f1beebb1-d722-453d-ab32-f986bfd746df\") " pod="metallb-system/metallb-operator-webhook-server-795889b56f-gt7lt" Sep 30 17:11:33 crc kubenswrapper[4818]: I0930 17:11:33.396610 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-795889b56f-gt7lt" Sep 30 17:11:33 crc kubenswrapper[4818]: I0930 17:11:33.562562 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6f59b986b5-d4h6m"] Sep 30 17:11:33 crc kubenswrapper[4818]: W0930 17:11:33.569220 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda43b85d0_34d6_49ad_9fc0_7580b2f2ef36.slice/crio-1bd4b2a15a1af37c13e61ebf23c41e62d9e979aebf883821d07f96e74f75b41f WatchSource:0}: Error finding container 1bd4b2a15a1af37c13e61ebf23c41e62d9e979aebf883821d07f96e74f75b41f: Status 404 returned error can't find the container with id 1bd4b2a15a1af37c13e61ebf23c41e62d9e979aebf883821d07f96e74f75b41f Sep 30 17:11:33 crc kubenswrapper[4818]: I0930 17:11:33.813310 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-795889b56f-gt7lt"] Sep 30 17:11:33 crc kubenswrapper[4818]: W0930 17:11:33.819895 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf1beebb1_d722_453d_ab32_f986bfd746df.slice/crio-61d16756c18a4152458cefa856aa51ece82d243830e037974a6f50e8ce93564b WatchSource:0}: Error finding container 61d16756c18a4152458cefa856aa51ece82d243830e037974a6f50e8ce93564b: Status 404 returned error can't find the container with id 61d16756c18a4152458cefa856aa51ece82d243830e037974a6f50e8ce93564b Sep 30 17:11:34 crc kubenswrapper[4818]: I0930 17:11:34.299677 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-795889b56f-gt7lt" event={"ID":"f1beebb1-d722-453d-ab32-f986bfd746df","Type":"ContainerStarted","Data":"61d16756c18a4152458cefa856aa51ece82d243830e037974a6f50e8ce93564b"} Sep 30 17:11:34 crc kubenswrapper[4818]: I0930 17:11:34.300899 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6f59b986b5-d4h6m" event={"ID":"a43b85d0-34d6-49ad-9fc0-7580b2f2ef36","Type":"ContainerStarted","Data":"1bd4b2a15a1af37c13e61ebf23c41e62d9e979aebf883821d07f96e74f75b41f"} Sep 30 17:11:38 crc kubenswrapper[4818]: I0930 17:11:38.334630 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6f59b986b5-d4h6m" event={"ID":"a43b85d0-34d6-49ad-9fc0-7580b2f2ef36","Type":"ContainerStarted","Data":"1adb89fd8a34ac6a7f1ba91a8647990f817771642044113e78ff4d70c477e82c"} Sep 30 17:11:38 crc kubenswrapper[4818]: I0930 17:11:38.334981 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6f59b986b5-d4h6m" Sep 30 17:11:38 crc kubenswrapper[4818]: I0930 17:11:38.358089 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-6f59b986b5-d4h6m" podStartSLOduration=2.686168774 podStartE2EDuration="6.358066901s" podCreationTimestamp="2025-09-30 17:11:32 +0000 UTC" firstStartedPulling="2025-09-30 17:11:33.571244347 +0000 UTC m=+740.325516163" lastFinishedPulling="2025-09-30 17:11:37.243142474 +0000 UTC m=+743.997414290" observedRunningTime="2025-09-30 17:11:38.353717853 +0000 UTC m=+745.107989669" watchObservedRunningTime="2025-09-30 17:11:38.358066901 +0000 UTC m=+745.112338727" Sep 30 17:11:40 crc kubenswrapper[4818]: I0930 17:11:40.345956 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-795889b56f-gt7lt" event={"ID":"f1beebb1-d722-453d-ab32-f986bfd746df","Type":"ContainerStarted","Data":"6175c64ac0d895b660014d129ec80bae2ba17fb785029cfb5eecd3966daa609d"} Sep 30 17:11:40 crc kubenswrapper[4818]: I0930 17:11:40.346268 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-795889b56f-gt7lt" Sep 30 17:11:40 crc kubenswrapper[4818]: I0930 17:11:40.372311 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-795889b56f-gt7lt" podStartSLOduration=1.59886866 podStartE2EDuration="7.372293285s" podCreationTimestamp="2025-09-30 17:11:33 +0000 UTC" firstStartedPulling="2025-09-30 17:11:33.823177308 +0000 UTC m=+740.577449134" lastFinishedPulling="2025-09-30 17:11:39.596601943 +0000 UTC m=+746.350873759" observedRunningTime="2025-09-30 17:11:40.370292231 +0000 UTC m=+747.124564067" watchObservedRunningTime="2025-09-30 17:11:40.372293285 +0000 UTC m=+747.126565121" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.299115 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xkh6t"] Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.299885 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" podUID="f7a2ab63-3622-49c3-abef-fc6ff98758e4" containerName="controller-manager" containerID="cri-o://6523711e78c424baaf7781e540fec1362619462d2879e1c2fa6d6cf7a2efae43" gracePeriod=30 Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.321454 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg"] Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.322030 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" podUID="a4a87b84-cb7a-4406-ac3a-473e984376a1" containerName="route-controller-manager" containerID="cri-o://bdd7e725ad71d4d3250c4733b370eed3d718ab861f030cbe0d04d4c67fdcc2b3" gracePeriod=30 Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.718178 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.723713 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.851391 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f7a2ab63-3622-49c3-abef-fc6ff98758e4-proxy-ca-bundles\") pod \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.851438 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7a2ab63-3622-49c3-abef-fc6ff98758e4-config\") pod \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.851467 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4a87b84-cb7a-4406-ac3a-473e984376a1-serving-cert\") pod \"a4a87b84-cb7a-4406-ac3a-473e984376a1\" (UID: \"a4a87b84-cb7a-4406-ac3a-473e984376a1\") " Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.851503 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7a2ab63-3622-49c3-abef-fc6ff98758e4-serving-cert\") pod \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.851538 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4a87b84-cb7a-4406-ac3a-473e984376a1-config\") pod \"a4a87b84-cb7a-4406-ac3a-473e984376a1\" (UID: \"a4a87b84-cb7a-4406-ac3a-473e984376a1\") " Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.851587 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ds77c\" (UniqueName: \"kubernetes.io/projected/f7a2ab63-3622-49c3-abef-fc6ff98758e4-kube-api-access-ds77c\") pod \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.851612 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a4a87b84-cb7a-4406-ac3a-473e984376a1-client-ca\") pod \"a4a87b84-cb7a-4406-ac3a-473e984376a1\" (UID: \"a4a87b84-cb7a-4406-ac3a-473e984376a1\") " Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.851641 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f7a2ab63-3622-49c3-abef-fc6ff98758e4-client-ca\") pod \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\" (UID: \"f7a2ab63-3622-49c3-abef-fc6ff98758e4\") " Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.851672 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7smv5\" (UniqueName: \"kubernetes.io/projected/a4a87b84-cb7a-4406-ac3a-473e984376a1-kube-api-access-7smv5\") pod \"a4a87b84-cb7a-4406-ac3a-473e984376a1\" (UID: \"a4a87b84-cb7a-4406-ac3a-473e984376a1\") " Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.852158 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4a87b84-cb7a-4406-ac3a-473e984376a1-client-ca" (OuterVolumeSpecName: "client-ca") pod "a4a87b84-cb7a-4406-ac3a-473e984376a1" (UID: "a4a87b84-cb7a-4406-ac3a-473e984376a1"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.852193 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4a87b84-cb7a-4406-ac3a-473e984376a1-config" (OuterVolumeSpecName: "config") pod "a4a87b84-cb7a-4406-ac3a-473e984376a1" (UID: "a4a87b84-cb7a-4406-ac3a-473e984376a1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.852366 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7a2ab63-3622-49c3-abef-fc6ff98758e4-client-ca" (OuterVolumeSpecName: "client-ca") pod "f7a2ab63-3622-49c3-abef-fc6ff98758e4" (UID: "f7a2ab63-3622-49c3-abef-fc6ff98758e4"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.852558 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7a2ab63-3622-49c3-abef-fc6ff98758e4-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "f7a2ab63-3622-49c3-abef-fc6ff98758e4" (UID: "f7a2ab63-3622-49c3-abef-fc6ff98758e4"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.854290 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7a2ab63-3622-49c3-abef-fc6ff98758e4-config" (OuterVolumeSpecName: "config") pod "f7a2ab63-3622-49c3-abef-fc6ff98758e4" (UID: "f7a2ab63-3622-49c3-abef-fc6ff98758e4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.856361 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4a87b84-cb7a-4406-ac3a-473e984376a1-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a4a87b84-cb7a-4406-ac3a-473e984376a1" (UID: "a4a87b84-cb7a-4406-ac3a-473e984376a1"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.860128 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7a2ab63-3622-49c3-abef-fc6ff98758e4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f7a2ab63-3622-49c3-abef-fc6ff98758e4" (UID: "f7a2ab63-3622-49c3-abef-fc6ff98758e4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.860312 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7a2ab63-3622-49c3-abef-fc6ff98758e4-kube-api-access-ds77c" (OuterVolumeSpecName: "kube-api-access-ds77c") pod "f7a2ab63-3622-49c3-abef-fc6ff98758e4" (UID: "f7a2ab63-3622-49c3-abef-fc6ff98758e4"). InnerVolumeSpecName "kube-api-access-ds77c". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.860347 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4a87b84-cb7a-4406-ac3a-473e984376a1-kube-api-access-7smv5" (OuterVolumeSpecName: "kube-api-access-7smv5") pod "a4a87b84-cb7a-4406-ac3a-473e984376a1" (UID: "a4a87b84-cb7a-4406-ac3a-473e984376a1"). InnerVolumeSpecName "kube-api-access-7smv5". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.953178 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ds77c\" (UniqueName: \"kubernetes.io/projected/f7a2ab63-3622-49c3-abef-fc6ff98758e4-kube-api-access-ds77c\") on node \"crc\" DevicePath \"\"" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.953223 4818 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a4a87b84-cb7a-4406-ac3a-473e984376a1-client-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.953234 4818 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f7a2ab63-3622-49c3-abef-fc6ff98758e4-client-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.953242 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7smv5\" (UniqueName: \"kubernetes.io/projected/a4a87b84-cb7a-4406-ac3a-473e984376a1-kube-api-access-7smv5\") on node \"crc\" DevicePath \"\"" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.953250 4818 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f7a2ab63-3622-49c3-abef-fc6ff98758e4-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.953259 4818 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7a2ab63-3622-49c3-abef-fc6ff98758e4-config\") on node \"crc\" DevicePath \"\"" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.953268 4818 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4a87b84-cb7a-4406-ac3a-473e984376a1-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.953276 4818 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7a2ab63-3622-49c3-abef-fc6ff98758e4-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 17:11:47 crc kubenswrapper[4818]: I0930 17:11:47.953284 4818 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4a87b84-cb7a-4406-ac3a-473e984376a1-config\") on node \"crc\" DevicePath \"\"" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.398857 4818 generic.go:334] "Generic (PLEG): container finished" podID="f7a2ab63-3622-49c3-abef-fc6ff98758e4" containerID="6523711e78c424baaf7781e540fec1362619462d2879e1c2fa6d6cf7a2efae43" exitCode=0 Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.398912 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.398954 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" event={"ID":"f7a2ab63-3622-49c3-abef-fc6ff98758e4","Type":"ContainerDied","Data":"6523711e78c424baaf7781e540fec1362619462d2879e1c2fa6d6cf7a2efae43"} Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.398988 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xkh6t" event={"ID":"f7a2ab63-3622-49c3-abef-fc6ff98758e4","Type":"ContainerDied","Data":"32ac8af477cdc5a16e2ce78429e59f84497082b1375368c4e6a678d6055875d9"} Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.399004 4818 scope.go:117] "RemoveContainer" containerID="6523711e78c424baaf7781e540fec1362619462d2879e1c2fa6d6cf7a2efae43" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.400557 4818 generic.go:334] "Generic (PLEG): container finished" podID="a4a87b84-cb7a-4406-ac3a-473e984376a1" containerID="bdd7e725ad71d4d3250c4733b370eed3d718ab861f030cbe0d04d4c67fdcc2b3" exitCode=0 Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.400577 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" event={"ID":"a4a87b84-cb7a-4406-ac3a-473e984376a1","Type":"ContainerDied","Data":"bdd7e725ad71d4d3250c4733b370eed3d718ab861f030cbe0d04d4c67fdcc2b3"} Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.400595 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" event={"ID":"a4a87b84-cb7a-4406-ac3a-473e984376a1","Type":"ContainerDied","Data":"71161e90649e21a2c1e2943b936ddfcecd738d45fb34de0249dcd23a1e01e253"} Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.401057 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.430514 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xkh6t"] Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.434769 4818 scope.go:117] "RemoveContainer" containerID="6523711e78c424baaf7781e540fec1362619462d2879e1c2fa6d6cf7a2efae43" Sep 30 17:11:48 crc kubenswrapper[4818]: E0930 17:11:48.435208 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6523711e78c424baaf7781e540fec1362619462d2879e1c2fa6d6cf7a2efae43\": container with ID starting with 6523711e78c424baaf7781e540fec1362619462d2879e1c2fa6d6cf7a2efae43 not found: ID does not exist" containerID="6523711e78c424baaf7781e540fec1362619462d2879e1c2fa6d6cf7a2efae43" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.435247 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6523711e78c424baaf7781e540fec1362619462d2879e1c2fa6d6cf7a2efae43"} err="failed to get container status \"6523711e78c424baaf7781e540fec1362619462d2879e1c2fa6d6cf7a2efae43\": rpc error: code = NotFound desc = could not find container \"6523711e78c424baaf7781e540fec1362619462d2879e1c2fa6d6cf7a2efae43\": container with ID starting with 6523711e78c424baaf7781e540fec1362619462d2879e1c2fa6d6cf7a2efae43 not found: ID does not exist" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.435274 4818 scope.go:117] "RemoveContainer" containerID="bdd7e725ad71d4d3250c4733b370eed3d718ab861f030cbe0d04d4c67fdcc2b3" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.439868 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xkh6t"] Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.447016 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg"] Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.449641 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xkkwg"] Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.450900 4818 scope.go:117] "RemoveContainer" containerID="bdd7e725ad71d4d3250c4733b370eed3d718ab861f030cbe0d04d4c67fdcc2b3" Sep 30 17:11:48 crc kubenswrapper[4818]: E0930 17:11:48.451437 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdd7e725ad71d4d3250c4733b370eed3d718ab861f030cbe0d04d4c67fdcc2b3\": container with ID starting with bdd7e725ad71d4d3250c4733b370eed3d718ab861f030cbe0d04d4c67fdcc2b3 not found: ID does not exist" containerID="bdd7e725ad71d4d3250c4733b370eed3d718ab861f030cbe0d04d4c67fdcc2b3" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.451472 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdd7e725ad71d4d3250c4733b370eed3d718ab861f030cbe0d04d4c67fdcc2b3"} err="failed to get container status \"bdd7e725ad71d4d3250c4733b370eed3d718ab861f030cbe0d04d4c67fdcc2b3\": rpc error: code = NotFound desc = could not find container \"bdd7e725ad71d4d3250c4733b370eed3d718ab861f030cbe0d04d4c67fdcc2b3\": container with ID starting with bdd7e725ad71d4d3250c4733b370eed3d718ab861f030cbe0d04d4c67fdcc2b3 not found: ID does not exist" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.533090 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-b54894654-hmsxf"] Sep 30 17:11:48 crc kubenswrapper[4818]: E0930 17:11:48.533354 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7a2ab63-3622-49c3-abef-fc6ff98758e4" containerName="controller-manager" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.533372 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7a2ab63-3622-49c3-abef-fc6ff98758e4" containerName="controller-manager" Sep 30 17:11:48 crc kubenswrapper[4818]: E0930 17:11:48.533390 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4a87b84-cb7a-4406-ac3a-473e984376a1" containerName="route-controller-manager" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.533397 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4a87b84-cb7a-4406-ac3a-473e984376a1" containerName="route-controller-manager" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.533490 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4a87b84-cb7a-4406-ac3a-473e984376a1" containerName="route-controller-manager" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.533502 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7a2ab63-3622-49c3-abef-fc6ff98758e4" containerName="controller-manager" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.533896 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.535529 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.536395 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.536679 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.536714 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.536732 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.537262 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng"] Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.538085 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.539021 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.553900 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.554348 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.554534 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.554983 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.554349 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.555239 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.564255 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng"] Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.568121 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.576774 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtmk6\" (UniqueName: \"kubernetes.io/projected/1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6-kube-api-access-dtmk6\") pod \"controller-manager-b54894654-hmsxf\" (UID: \"1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6\") " pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.576821 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6-client-ca\") pod \"controller-manager-b54894654-hmsxf\" (UID: \"1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6\") " pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.576850 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6-config\") pod \"controller-manager-b54894654-hmsxf\" (UID: \"1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6\") " pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.576901 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7c13209b-80db-4d00-b717-b3ef56b985ff-client-ca\") pod \"route-controller-manager-84bf8fc754-dcmng\" (UID: \"7c13209b-80db-4d00-b717-b3ef56b985ff\") " pod="openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.576948 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sms8z\" (UniqueName: \"kubernetes.io/projected/7c13209b-80db-4d00-b717-b3ef56b985ff-kube-api-access-sms8z\") pod \"route-controller-manager-84bf8fc754-dcmng\" (UID: \"7c13209b-80db-4d00-b717-b3ef56b985ff\") " pod="openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.576974 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6-proxy-ca-bundles\") pod \"controller-manager-b54894654-hmsxf\" (UID: \"1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6\") " pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.576999 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c13209b-80db-4d00-b717-b3ef56b985ff-config\") pod \"route-controller-manager-84bf8fc754-dcmng\" (UID: \"7c13209b-80db-4d00-b717-b3ef56b985ff\") " pod="openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.577020 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6-serving-cert\") pod \"controller-manager-b54894654-hmsxf\" (UID: \"1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6\") " pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.577039 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c13209b-80db-4d00-b717-b3ef56b985ff-serving-cert\") pod \"route-controller-manager-84bf8fc754-dcmng\" (UID: \"7c13209b-80db-4d00-b717-b3ef56b985ff\") " pod="openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.579978 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-b54894654-hmsxf"] Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.678097 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6-config\") pod \"controller-manager-b54894654-hmsxf\" (UID: \"1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6\") " pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.678180 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7c13209b-80db-4d00-b717-b3ef56b985ff-client-ca\") pod \"route-controller-manager-84bf8fc754-dcmng\" (UID: \"7c13209b-80db-4d00-b717-b3ef56b985ff\") " pod="openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.678214 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sms8z\" (UniqueName: \"kubernetes.io/projected/7c13209b-80db-4d00-b717-b3ef56b985ff-kube-api-access-sms8z\") pod \"route-controller-manager-84bf8fc754-dcmng\" (UID: \"7c13209b-80db-4d00-b717-b3ef56b985ff\") " pod="openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.678243 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6-proxy-ca-bundles\") pod \"controller-manager-b54894654-hmsxf\" (UID: \"1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6\") " pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.678267 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c13209b-80db-4d00-b717-b3ef56b985ff-config\") pod \"route-controller-manager-84bf8fc754-dcmng\" (UID: \"7c13209b-80db-4d00-b717-b3ef56b985ff\") " pod="openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.678289 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6-serving-cert\") pod \"controller-manager-b54894654-hmsxf\" (UID: \"1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6\") " pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.678313 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c13209b-80db-4d00-b717-b3ef56b985ff-serving-cert\") pod \"route-controller-manager-84bf8fc754-dcmng\" (UID: \"7c13209b-80db-4d00-b717-b3ef56b985ff\") " pod="openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.678356 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtmk6\" (UniqueName: \"kubernetes.io/projected/1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6-kube-api-access-dtmk6\") pod \"controller-manager-b54894654-hmsxf\" (UID: \"1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6\") " pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.678382 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6-client-ca\") pod \"controller-manager-b54894654-hmsxf\" (UID: \"1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6\") " pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.679169 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7c13209b-80db-4d00-b717-b3ef56b985ff-client-ca\") pod \"route-controller-manager-84bf8fc754-dcmng\" (UID: \"7c13209b-80db-4d00-b717-b3ef56b985ff\") " pod="openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.679573 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6-client-ca\") pod \"controller-manager-b54894654-hmsxf\" (UID: \"1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6\") " pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.679626 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6-config\") pod \"controller-manager-b54894654-hmsxf\" (UID: \"1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6\") " pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.680018 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c13209b-80db-4d00-b717-b3ef56b985ff-config\") pod \"route-controller-manager-84bf8fc754-dcmng\" (UID: \"7c13209b-80db-4d00-b717-b3ef56b985ff\") " pod="openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.682755 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c13209b-80db-4d00-b717-b3ef56b985ff-serving-cert\") pod \"route-controller-manager-84bf8fc754-dcmng\" (UID: \"7c13209b-80db-4d00-b717-b3ef56b985ff\") " pod="openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.684452 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6-serving-cert\") pod \"controller-manager-b54894654-hmsxf\" (UID: \"1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6\") " pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.701095 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtmk6\" (UniqueName: \"kubernetes.io/projected/1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6-kube-api-access-dtmk6\") pod \"controller-manager-b54894654-hmsxf\" (UID: \"1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6\") " pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.703477 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sms8z\" (UniqueName: \"kubernetes.io/projected/7c13209b-80db-4d00-b717-b3ef56b985ff-kube-api-access-sms8z\") pod \"route-controller-manager-84bf8fc754-dcmng\" (UID: \"7c13209b-80db-4d00-b717-b3ef56b985ff\") " pod="openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.714648 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6-proxy-ca-bundles\") pod \"controller-manager-b54894654-hmsxf\" (UID: \"1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6\") " pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.850721 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" Sep 30 17:11:48 crc kubenswrapper[4818]: I0930 17:11:48.873364 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng" Sep 30 17:11:49 crc kubenswrapper[4818]: I0930 17:11:49.119033 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-b54894654-hmsxf"] Sep 30 17:11:49 crc kubenswrapper[4818]: I0930 17:11:49.142362 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng"] Sep 30 17:11:49 crc kubenswrapper[4818]: W0930 17:11:49.149277 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7c13209b_80db_4d00_b717_b3ef56b985ff.slice/crio-a397d9e0032715958754be9fe8711e0b10315a03897eef3b0e3da1f24f7692ea WatchSource:0}: Error finding container a397d9e0032715958754be9fe8711e0b10315a03897eef3b0e3da1f24f7692ea: Status 404 returned error can't find the container with id a397d9e0032715958754be9fe8711e0b10315a03897eef3b0e3da1f24f7692ea Sep 30 17:11:49 crc kubenswrapper[4818]: I0930 17:11:49.409550 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng" event={"ID":"7c13209b-80db-4d00-b717-b3ef56b985ff","Type":"ContainerStarted","Data":"a397d9e0032715958754be9fe8711e0b10315a03897eef3b0e3da1f24f7692ea"} Sep 30 17:11:49 crc kubenswrapper[4818]: I0930 17:11:49.410712 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" event={"ID":"1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6","Type":"ContainerStarted","Data":"f2a10857a331b37517abaaa88265b4a1169dc60c0dd4915c09a74ad6df55acac"} Sep 30 17:11:50 crc kubenswrapper[4818]: I0930 17:11:50.026720 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4a87b84-cb7a-4406-ac3a-473e984376a1" path="/var/lib/kubelet/pods/a4a87b84-cb7a-4406-ac3a-473e984376a1/volumes" Sep 30 17:11:50 crc kubenswrapper[4818]: I0930 17:11:50.027897 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7a2ab63-3622-49c3-abef-fc6ff98758e4" path="/var/lib/kubelet/pods/f7a2ab63-3622-49c3-abef-fc6ff98758e4/volumes" Sep 30 17:11:50 crc kubenswrapper[4818]: I0930 17:11:50.417619 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng" event={"ID":"7c13209b-80db-4d00-b717-b3ef56b985ff","Type":"ContainerStarted","Data":"2b8fe0af852ab66cfa970beefd5e1a7920b3d899a696990b8a59535a5c574cdf"} Sep 30 17:11:50 crc kubenswrapper[4818]: I0930 17:11:50.417959 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng" Sep 30 17:11:50 crc kubenswrapper[4818]: I0930 17:11:50.423633 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng" Sep 30 17:11:50 crc kubenswrapper[4818]: I0930 17:11:50.424528 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" event={"ID":"1cf2d3a8-bfef-4baa-9fcc-65c1cdab6ab6","Type":"ContainerStarted","Data":"75e2e2642428af2c10cb68a3366f356dd16990ac3df2728fcca15db0bb1b4d07"} Sep 30 17:11:50 crc kubenswrapper[4818]: I0930 17:11:50.424748 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" Sep 30 17:11:50 crc kubenswrapper[4818]: I0930 17:11:50.429410 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" Sep 30 17:11:50 crc kubenswrapper[4818]: I0930 17:11:50.439546 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-84bf8fc754-dcmng" podStartSLOduration=3.439523367 podStartE2EDuration="3.439523367s" podCreationTimestamp="2025-09-30 17:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:11:50.438036646 +0000 UTC m=+757.192308512" watchObservedRunningTime="2025-09-30 17:11:50.439523367 +0000 UTC m=+757.193795183" Sep 30 17:11:50 crc kubenswrapper[4818]: I0930 17:11:50.460918 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-b54894654-hmsxf" podStartSLOduration=3.460888815 podStartE2EDuration="3.460888815s" podCreationTimestamp="2025-09-30 17:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:11:50.45958388 +0000 UTC m=+757.213855696" watchObservedRunningTime="2025-09-30 17:11:50.460888815 +0000 UTC m=+757.215160651" Sep 30 17:11:52 crc kubenswrapper[4818]: I0930 17:11:52.596328 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:11:52 crc kubenswrapper[4818]: I0930 17:11:52.596718 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:11:52 crc kubenswrapper[4818]: I0930 17:11:52.596785 4818 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 17:11:52 crc kubenswrapper[4818]: I0930 17:11:52.597716 4818 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f2e60af7181a017f3a998586cfb2fbfcd7d49b22c87395265f8c90eee19ee429"} pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 30 17:11:52 crc kubenswrapper[4818]: I0930 17:11:52.597808 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" containerID="cri-o://f2e60af7181a017f3a998586cfb2fbfcd7d49b22c87395265f8c90eee19ee429" gracePeriod=600 Sep 30 17:11:53 crc kubenswrapper[4818]: I0930 17:11:53.406506 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-795889b56f-gt7lt" Sep 30 17:11:53 crc kubenswrapper[4818]: I0930 17:11:53.445981 4818 generic.go:334] "Generic (PLEG): container finished" podID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerID="f2e60af7181a017f3a998586cfb2fbfcd7d49b22c87395265f8c90eee19ee429" exitCode=0 Sep 30 17:11:53 crc kubenswrapper[4818]: I0930 17:11:53.446020 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerDied","Data":"f2e60af7181a017f3a998586cfb2fbfcd7d49b22c87395265f8c90eee19ee429"} Sep 30 17:11:53 crc kubenswrapper[4818]: I0930 17:11:53.446075 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerStarted","Data":"bef732d2824af20a982f56bcc38b49ae15a3f1c74de5e344956d8799c207e863"} Sep 30 17:11:53 crc kubenswrapper[4818]: I0930 17:11:53.446096 4818 scope.go:117] "RemoveContainer" containerID="976854ec79c20d7638f4498aeb9ec7b57e80f726c0c53aeb06c9052ad1713c03" Sep 30 17:11:57 crc kubenswrapper[4818]: I0930 17:11:57.978560 4818 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Sep 30 17:12:13 crc kubenswrapper[4818]: I0930 17:12:13.091264 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6f59b986b5-d4h6m" Sep 30 17:12:13 crc kubenswrapper[4818]: I0930 17:12:13.924847 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-5478bdb765-kmldl"] Sep 30 17:12:13 crc kubenswrapper[4818]: I0930 17:12:13.926108 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-5478bdb765-kmldl" Sep 30 17:12:13 crc kubenswrapper[4818]: I0930 17:12:13.938279 4818 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-2rndc" Sep 30 17:12:13 crc kubenswrapper[4818]: I0930 17:12:13.938719 4818 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Sep 30 17:12:13 crc kubenswrapper[4818]: I0930 17:12:13.953679 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-dwbnt"] Sep 30 17:12:13 crc kubenswrapper[4818]: I0930 17:12:13.956876 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:13 crc kubenswrapper[4818]: I0930 17:12:13.965444 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Sep 30 17:12:13 crc kubenswrapper[4818]: I0930 17:12:13.965442 4818 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Sep 30 17:12:13 crc kubenswrapper[4818]: I0930 17:12:13.981154 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-5478bdb765-kmldl"] Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.040759 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-s4j9s"] Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.041609 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-s4j9s" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.043374 4818 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-8tm27" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.043580 4818 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.047485 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.047737 4818 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.052056 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-5d688f5ffc-46jj6"] Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.052999 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-5d688f5ffc-46jj6" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.059148 4818 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.062074 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/82c7a122-7887-4d75-a960-c8aa40a748f4-frr-conf\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.062145 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hqh5\" (UniqueName: \"kubernetes.io/projected/26bab40e-d8f8-478e-aafc-bbd2f7368f72-kube-api-access-5hqh5\") pod \"speaker-s4j9s\" (UID: \"26bab40e-d8f8-478e-aafc-bbd2f7368f72\") " pod="metallb-system/speaker-s4j9s" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.062170 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/82c7a122-7887-4d75-a960-c8aa40a748f4-frr-sockets\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.062228 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7e3434fb-c492-4b7d-8d60-42e4bd658f43-cert\") pod \"controller-5d688f5ffc-46jj6\" (UID: \"7e3434fb-c492-4b7d-8d60-42e4bd658f43\") " pod="metallb-system/controller-5d688f5ffc-46jj6" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.062246 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zb77\" (UniqueName: \"kubernetes.io/projected/7e3434fb-c492-4b7d-8d60-42e4bd658f43-kube-api-access-6zb77\") pod \"controller-5d688f5ffc-46jj6\" (UID: \"7e3434fb-c492-4b7d-8d60-42e4bd658f43\") " pod="metallb-system/controller-5d688f5ffc-46jj6" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.062288 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7e3434fb-c492-4b7d-8d60-42e4bd658f43-metrics-certs\") pod \"controller-5d688f5ffc-46jj6\" (UID: \"7e3434fb-c492-4b7d-8d60-42e4bd658f43\") " pod="metallb-system/controller-5d688f5ffc-46jj6" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.062308 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/82c7a122-7887-4d75-a960-c8aa40a748f4-reloader\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.062324 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdcsx\" (UniqueName: \"kubernetes.io/projected/82c7a122-7887-4d75-a960-c8aa40a748f4-kube-api-access-zdcsx\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.062345 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/82c7a122-7887-4d75-a960-c8aa40a748f4-metrics\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.062372 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/26bab40e-d8f8-478e-aafc-bbd2f7368f72-metallb-excludel2\") pod \"speaker-s4j9s\" (UID: \"26bab40e-d8f8-478e-aafc-bbd2f7368f72\") " pod="metallb-system/speaker-s4j9s" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.062405 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/82c7a122-7887-4d75-a960-c8aa40a748f4-frr-startup\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.062420 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/26bab40e-d8f8-478e-aafc-bbd2f7368f72-memberlist\") pod \"speaker-s4j9s\" (UID: \"26bab40e-d8f8-478e-aafc-bbd2f7368f72\") " pod="metallb-system/speaker-s4j9s" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.062445 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/82c7a122-7887-4d75-a960-c8aa40a748f4-metrics-certs\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.062468 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/23b571c9-e316-4943-b334-505074c4a50e-cert\") pod \"frr-k8s-webhook-server-5478bdb765-kmldl\" (UID: \"23b571c9-e316-4943-b334-505074c4a50e\") " pod="metallb-system/frr-k8s-webhook-server-5478bdb765-kmldl" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.062542 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/26bab40e-d8f8-478e-aafc-bbd2f7368f72-metrics-certs\") pod \"speaker-s4j9s\" (UID: \"26bab40e-d8f8-478e-aafc-bbd2f7368f72\") " pod="metallb-system/speaker-s4j9s" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.062567 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vfzg\" (UniqueName: \"kubernetes.io/projected/23b571c9-e316-4943-b334-505074c4a50e-kube-api-access-4vfzg\") pod \"frr-k8s-webhook-server-5478bdb765-kmldl\" (UID: \"23b571c9-e316-4943-b334-505074c4a50e\") " pod="metallb-system/frr-k8s-webhook-server-5478bdb765-kmldl" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.063186 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-5d688f5ffc-46jj6"] Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.163304 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/26bab40e-d8f8-478e-aafc-bbd2f7368f72-metrics-certs\") pod \"speaker-s4j9s\" (UID: \"26bab40e-d8f8-478e-aafc-bbd2f7368f72\") " pod="metallb-system/speaker-s4j9s" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.163379 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vfzg\" (UniqueName: \"kubernetes.io/projected/23b571c9-e316-4943-b334-505074c4a50e-kube-api-access-4vfzg\") pod \"frr-k8s-webhook-server-5478bdb765-kmldl\" (UID: \"23b571c9-e316-4943-b334-505074c4a50e\") " pod="metallb-system/frr-k8s-webhook-server-5478bdb765-kmldl" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.163416 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/82c7a122-7887-4d75-a960-c8aa40a748f4-frr-conf\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.163451 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hqh5\" (UniqueName: \"kubernetes.io/projected/26bab40e-d8f8-478e-aafc-bbd2f7368f72-kube-api-access-5hqh5\") pod \"speaker-s4j9s\" (UID: \"26bab40e-d8f8-478e-aafc-bbd2f7368f72\") " pod="metallb-system/speaker-s4j9s" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.163478 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/82c7a122-7887-4d75-a960-c8aa40a748f4-frr-sockets\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.163511 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7e3434fb-c492-4b7d-8d60-42e4bd658f43-cert\") pod \"controller-5d688f5ffc-46jj6\" (UID: \"7e3434fb-c492-4b7d-8d60-42e4bd658f43\") " pod="metallb-system/controller-5d688f5ffc-46jj6" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.163535 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zb77\" (UniqueName: \"kubernetes.io/projected/7e3434fb-c492-4b7d-8d60-42e4bd658f43-kube-api-access-6zb77\") pod \"controller-5d688f5ffc-46jj6\" (UID: \"7e3434fb-c492-4b7d-8d60-42e4bd658f43\") " pod="metallb-system/controller-5d688f5ffc-46jj6" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.163564 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7e3434fb-c492-4b7d-8d60-42e4bd658f43-metrics-certs\") pod \"controller-5d688f5ffc-46jj6\" (UID: \"7e3434fb-c492-4b7d-8d60-42e4bd658f43\") " pod="metallb-system/controller-5d688f5ffc-46jj6" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.163587 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdcsx\" (UniqueName: \"kubernetes.io/projected/82c7a122-7887-4d75-a960-c8aa40a748f4-kube-api-access-zdcsx\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.163607 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/82c7a122-7887-4d75-a960-c8aa40a748f4-reloader\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.163626 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/82c7a122-7887-4d75-a960-c8aa40a748f4-metrics\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.163653 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/26bab40e-d8f8-478e-aafc-bbd2f7368f72-metallb-excludel2\") pod \"speaker-s4j9s\" (UID: \"26bab40e-d8f8-478e-aafc-bbd2f7368f72\") " pod="metallb-system/speaker-s4j9s" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.163681 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/82c7a122-7887-4d75-a960-c8aa40a748f4-frr-startup\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.163700 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/26bab40e-d8f8-478e-aafc-bbd2f7368f72-memberlist\") pod \"speaker-s4j9s\" (UID: \"26bab40e-d8f8-478e-aafc-bbd2f7368f72\") " pod="metallb-system/speaker-s4j9s" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.163725 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/82c7a122-7887-4d75-a960-c8aa40a748f4-metrics-certs\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.163748 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/23b571c9-e316-4943-b334-505074c4a50e-cert\") pod \"frr-k8s-webhook-server-5478bdb765-kmldl\" (UID: \"23b571c9-e316-4943-b334-505074c4a50e\") " pod="metallb-system/frr-k8s-webhook-server-5478bdb765-kmldl" Sep 30 17:12:14 crc kubenswrapper[4818]: E0930 17:12:14.163748 4818 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Sep 30 17:12:14 crc kubenswrapper[4818]: E0930 17:12:14.163869 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7e3434fb-c492-4b7d-8d60-42e4bd658f43-metrics-certs podName:7e3434fb-c492-4b7d-8d60-42e4bd658f43 nodeName:}" failed. No retries permitted until 2025-09-30 17:12:14.663847314 +0000 UTC m=+781.418119140 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7e3434fb-c492-4b7d-8d60-42e4bd658f43-metrics-certs") pod "controller-5d688f5ffc-46jj6" (UID: "7e3434fb-c492-4b7d-8d60-42e4bd658f43") : secret "controller-certs-secret" not found Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.164026 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/82c7a122-7887-4d75-a960-c8aa40a748f4-frr-conf\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.164148 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/82c7a122-7887-4d75-a960-c8aa40a748f4-frr-sockets\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.164449 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/82c7a122-7887-4d75-a960-c8aa40a748f4-reloader\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: E0930 17:12:14.164600 4818 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Sep 30 17:12:14 crc kubenswrapper[4818]: E0930 17:12:14.164655 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/26bab40e-d8f8-478e-aafc-bbd2f7368f72-memberlist podName:26bab40e-d8f8-478e-aafc-bbd2f7368f72 nodeName:}" failed. No retries permitted until 2025-09-30 17:12:14.664635956 +0000 UTC m=+781.418907772 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/26bab40e-d8f8-478e-aafc-bbd2f7368f72-memberlist") pod "speaker-s4j9s" (UID: "26bab40e-d8f8-478e-aafc-bbd2f7368f72") : secret "metallb-memberlist" not found Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.164676 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/82c7a122-7887-4d75-a960-c8aa40a748f4-metrics\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.164802 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/26bab40e-d8f8-478e-aafc-bbd2f7368f72-metallb-excludel2\") pod \"speaker-s4j9s\" (UID: \"26bab40e-d8f8-478e-aafc-bbd2f7368f72\") " pod="metallb-system/speaker-s4j9s" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.165071 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/82c7a122-7887-4d75-a960-c8aa40a748f4-frr-startup\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.167139 4818 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.167241 4818 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.172621 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/82c7a122-7887-4d75-a960-c8aa40a748f4-metrics-certs\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.177612 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7e3434fb-c492-4b7d-8d60-42e4bd658f43-cert\") pod \"controller-5d688f5ffc-46jj6\" (UID: \"7e3434fb-c492-4b7d-8d60-42e4bd658f43\") " pod="metallb-system/controller-5d688f5ffc-46jj6" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.178290 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/26bab40e-d8f8-478e-aafc-bbd2f7368f72-metrics-certs\") pod \"speaker-s4j9s\" (UID: \"26bab40e-d8f8-478e-aafc-bbd2f7368f72\") " pod="metallb-system/speaker-s4j9s" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.180571 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/23b571c9-e316-4943-b334-505074c4a50e-cert\") pod \"frr-k8s-webhook-server-5478bdb765-kmldl\" (UID: \"23b571c9-e316-4943-b334-505074c4a50e\") " pod="metallb-system/frr-k8s-webhook-server-5478bdb765-kmldl" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.183604 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vfzg\" (UniqueName: \"kubernetes.io/projected/23b571c9-e316-4943-b334-505074c4a50e-kube-api-access-4vfzg\") pod \"frr-k8s-webhook-server-5478bdb765-kmldl\" (UID: \"23b571c9-e316-4943-b334-505074c4a50e\") " pod="metallb-system/frr-k8s-webhook-server-5478bdb765-kmldl" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.184710 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdcsx\" (UniqueName: \"kubernetes.io/projected/82c7a122-7887-4d75-a960-c8aa40a748f4-kube-api-access-zdcsx\") pod \"frr-k8s-dwbnt\" (UID: \"82c7a122-7887-4d75-a960-c8aa40a748f4\") " pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.184839 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hqh5\" (UniqueName: \"kubernetes.io/projected/26bab40e-d8f8-478e-aafc-bbd2f7368f72-kube-api-access-5hqh5\") pod \"speaker-s4j9s\" (UID: \"26bab40e-d8f8-478e-aafc-bbd2f7368f72\") " pod="metallb-system/speaker-s4j9s" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.186143 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zb77\" (UniqueName: \"kubernetes.io/projected/7e3434fb-c492-4b7d-8d60-42e4bd658f43-kube-api-access-6zb77\") pod \"controller-5d688f5ffc-46jj6\" (UID: \"7e3434fb-c492-4b7d-8d60-42e4bd658f43\") " pod="metallb-system/controller-5d688f5ffc-46jj6" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.255614 4818 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-2rndc" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.264608 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-5478bdb765-kmldl" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.279246 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.589234 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dwbnt" event={"ID":"82c7a122-7887-4d75-a960-c8aa40a748f4","Type":"ContainerStarted","Data":"cbb907cc9aaba61019688ee100b54937e9a9ece3ec117a62102cf05ec4f88259"} Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.669093 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7e3434fb-c492-4b7d-8d60-42e4bd658f43-metrics-certs\") pod \"controller-5d688f5ffc-46jj6\" (UID: \"7e3434fb-c492-4b7d-8d60-42e4bd658f43\") " pod="metallb-system/controller-5d688f5ffc-46jj6" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.669224 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/26bab40e-d8f8-478e-aafc-bbd2f7368f72-memberlist\") pod \"speaker-s4j9s\" (UID: \"26bab40e-d8f8-478e-aafc-bbd2f7368f72\") " pod="metallb-system/speaker-s4j9s" Sep 30 17:12:14 crc kubenswrapper[4818]: E0930 17:12:14.669549 4818 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Sep 30 17:12:14 crc kubenswrapper[4818]: E0930 17:12:14.669647 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/26bab40e-d8f8-478e-aafc-bbd2f7368f72-memberlist podName:26bab40e-d8f8-478e-aafc-bbd2f7368f72 nodeName:}" failed. No retries permitted until 2025-09-30 17:12:15.669620487 +0000 UTC m=+782.423892343 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/26bab40e-d8f8-478e-aafc-bbd2f7368f72-memberlist") pod "speaker-s4j9s" (UID: "26bab40e-d8f8-478e-aafc-bbd2f7368f72") : secret "metallb-memberlist" not found Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.678035 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7e3434fb-c492-4b7d-8d60-42e4bd658f43-metrics-certs\") pod \"controller-5d688f5ffc-46jj6\" (UID: \"7e3434fb-c492-4b7d-8d60-42e4bd658f43\") " pod="metallb-system/controller-5d688f5ffc-46jj6" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.683436 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-5d688f5ffc-46jj6" Sep 30 17:12:14 crc kubenswrapper[4818]: I0930 17:12:14.704070 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-5478bdb765-kmldl"] Sep 30 17:12:15 crc kubenswrapper[4818]: I0930 17:12:15.166184 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-5d688f5ffc-46jj6"] Sep 30 17:12:15 crc kubenswrapper[4818]: W0930 17:12:15.175359 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7e3434fb_c492_4b7d_8d60_42e4bd658f43.slice/crio-4a734eec5322ab907db83dbfa004eaf906e31a96a2fc8eea145f61703ae5c414 WatchSource:0}: Error finding container 4a734eec5322ab907db83dbfa004eaf906e31a96a2fc8eea145f61703ae5c414: Status 404 returned error can't find the container with id 4a734eec5322ab907db83dbfa004eaf906e31a96a2fc8eea145f61703ae5c414 Sep 30 17:12:15 crc kubenswrapper[4818]: I0930 17:12:15.596324 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-5478bdb765-kmldl" event={"ID":"23b571c9-e316-4943-b334-505074c4a50e","Type":"ContainerStarted","Data":"06eb028bfc6180f5701317ef0b443dc15f9c9c731f757b5d8ec9d033cb81b812"} Sep 30 17:12:15 crc kubenswrapper[4818]: I0930 17:12:15.598166 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5d688f5ffc-46jj6" event={"ID":"7e3434fb-c492-4b7d-8d60-42e4bd658f43","Type":"ContainerStarted","Data":"118f0ec67c351fedcfb936f4b4d3ada535be220245c7474e37928bc59e6e8bd8"} Sep 30 17:12:15 crc kubenswrapper[4818]: I0930 17:12:15.598196 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5d688f5ffc-46jj6" event={"ID":"7e3434fb-c492-4b7d-8d60-42e4bd658f43","Type":"ContainerStarted","Data":"b39961080e9054650ad38ad7bbb4d30645291dbb22fe4144f3876174ccff51fc"} Sep 30 17:12:15 crc kubenswrapper[4818]: I0930 17:12:15.598206 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5d688f5ffc-46jj6" event={"ID":"7e3434fb-c492-4b7d-8d60-42e4bd658f43","Type":"ContainerStarted","Data":"4a734eec5322ab907db83dbfa004eaf906e31a96a2fc8eea145f61703ae5c414"} Sep 30 17:12:15 crc kubenswrapper[4818]: I0930 17:12:15.599211 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-5d688f5ffc-46jj6" Sep 30 17:12:15 crc kubenswrapper[4818]: I0930 17:12:15.682877 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/26bab40e-d8f8-478e-aafc-bbd2f7368f72-memberlist\") pod \"speaker-s4j9s\" (UID: \"26bab40e-d8f8-478e-aafc-bbd2f7368f72\") " pod="metallb-system/speaker-s4j9s" Sep 30 17:12:15 crc kubenswrapper[4818]: I0930 17:12:15.699661 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/26bab40e-d8f8-478e-aafc-bbd2f7368f72-memberlist\") pod \"speaker-s4j9s\" (UID: \"26bab40e-d8f8-478e-aafc-bbd2f7368f72\") " pod="metallb-system/speaker-s4j9s" Sep 30 17:12:15 crc kubenswrapper[4818]: I0930 17:12:15.863295 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-s4j9s" Sep 30 17:12:15 crc kubenswrapper[4818]: W0930 17:12:15.884519 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod26bab40e_d8f8_478e_aafc_bbd2f7368f72.slice/crio-791bc77603560cd4c86c12fedc82d07173c9c8b34399ee076ccce0c05b83715b WatchSource:0}: Error finding container 791bc77603560cd4c86c12fedc82d07173c9c8b34399ee076ccce0c05b83715b: Status 404 returned error can't find the container with id 791bc77603560cd4c86c12fedc82d07173c9c8b34399ee076ccce0c05b83715b Sep 30 17:12:16 crc kubenswrapper[4818]: I0930 17:12:16.609414 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-s4j9s" event={"ID":"26bab40e-d8f8-478e-aafc-bbd2f7368f72","Type":"ContainerStarted","Data":"39669ebe2ebabc50b67c7db2da2017b94690af5db076770517b4683cef4db7b4"} Sep 30 17:12:16 crc kubenswrapper[4818]: I0930 17:12:16.609462 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-s4j9s" event={"ID":"26bab40e-d8f8-478e-aafc-bbd2f7368f72","Type":"ContainerStarted","Data":"f8d88fe5467e8b52160b51a73a6eace31a2f440e16bff4f67772fa01db3d9e55"} Sep 30 17:12:16 crc kubenswrapper[4818]: I0930 17:12:16.609472 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-s4j9s" event={"ID":"26bab40e-d8f8-478e-aafc-bbd2f7368f72","Type":"ContainerStarted","Data":"791bc77603560cd4c86c12fedc82d07173c9c8b34399ee076ccce0c05b83715b"} Sep 30 17:12:16 crc kubenswrapper[4818]: I0930 17:12:16.610363 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-s4j9s" Sep 30 17:12:16 crc kubenswrapper[4818]: I0930 17:12:16.627752 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-5d688f5ffc-46jj6" podStartSLOduration=2.627735723 podStartE2EDuration="2.627735723s" podCreationTimestamp="2025-09-30 17:12:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:12:15.619382464 +0000 UTC m=+782.373654310" watchObservedRunningTime="2025-09-30 17:12:16.627735723 +0000 UTC m=+783.382007539" Sep 30 17:12:16 crc kubenswrapper[4818]: I0930 17:12:16.630295 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-s4j9s" podStartSLOduration=2.630286812 podStartE2EDuration="2.630286812s" podCreationTimestamp="2025-09-30 17:12:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:12:16.628119673 +0000 UTC m=+783.382391499" watchObservedRunningTime="2025-09-30 17:12:16.630286812 +0000 UTC m=+783.384558628" Sep 30 17:12:22 crc kubenswrapper[4818]: I0930 17:12:22.651587 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-5478bdb765-kmldl" event={"ID":"23b571c9-e316-4943-b334-505074c4a50e","Type":"ContainerStarted","Data":"9fdc9393e0467487656a48d407cb6b77918885529f84dd2d49de191ac15d45e7"} Sep 30 17:12:22 crc kubenswrapper[4818]: I0930 17:12:22.652224 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-5478bdb765-kmldl" Sep 30 17:12:22 crc kubenswrapper[4818]: I0930 17:12:22.654403 4818 generic.go:334] "Generic (PLEG): container finished" podID="82c7a122-7887-4d75-a960-c8aa40a748f4" containerID="72467a713d264ed22c47577e454889dec0cacc1c2346e8580421e88ae673d68c" exitCode=0 Sep 30 17:12:22 crc kubenswrapper[4818]: I0930 17:12:22.654453 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dwbnt" event={"ID":"82c7a122-7887-4d75-a960-c8aa40a748f4","Type":"ContainerDied","Data":"72467a713d264ed22c47577e454889dec0cacc1c2346e8580421e88ae673d68c"} Sep 30 17:12:22 crc kubenswrapper[4818]: I0930 17:12:22.674487 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-5478bdb765-kmldl" podStartSLOduration=2.52806037 podStartE2EDuration="9.674463001s" podCreationTimestamp="2025-09-30 17:12:13 +0000 UTC" firstStartedPulling="2025-09-30 17:12:14.718117115 +0000 UTC m=+781.472388981" lastFinishedPulling="2025-09-30 17:12:21.864519756 +0000 UTC m=+788.618791612" observedRunningTime="2025-09-30 17:12:22.673241779 +0000 UTC m=+789.427513635" watchObservedRunningTime="2025-09-30 17:12:22.674463001 +0000 UTC m=+789.428734847" Sep 30 17:12:23 crc kubenswrapper[4818]: I0930 17:12:23.666557 4818 generic.go:334] "Generic (PLEG): container finished" podID="82c7a122-7887-4d75-a960-c8aa40a748f4" containerID="e7d4a80f03222c5f7190c91c5a7982131f15a40ac6e87e6e5e6d15646acc0a35" exitCode=0 Sep 30 17:12:23 crc kubenswrapper[4818]: I0930 17:12:23.666663 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dwbnt" event={"ID":"82c7a122-7887-4d75-a960-c8aa40a748f4","Type":"ContainerDied","Data":"e7d4a80f03222c5f7190c91c5a7982131f15a40ac6e87e6e5e6d15646acc0a35"} Sep 30 17:12:24 crc kubenswrapper[4818]: I0930 17:12:24.674779 4818 generic.go:334] "Generic (PLEG): container finished" podID="82c7a122-7887-4d75-a960-c8aa40a748f4" containerID="574cd300a0e76d766ad50ba30aadeb4742f7ea4cb40dfbf284ba58ed91c03c94" exitCode=0 Sep 30 17:12:24 crc kubenswrapper[4818]: I0930 17:12:24.674857 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dwbnt" event={"ID":"82c7a122-7887-4d75-a960-c8aa40a748f4","Type":"ContainerDied","Data":"574cd300a0e76d766ad50ba30aadeb4742f7ea4cb40dfbf284ba58ed91c03c94"} Sep 30 17:12:25 crc kubenswrapper[4818]: I0930 17:12:25.686286 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dwbnt" event={"ID":"82c7a122-7887-4d75-a960-c8aa40a748f4","Type":"ContainerStarted","Data":"7cc66c19b319e8b567b963cf6be2fd6284f21ece34da8ef0d93f02e59fda3bc3"} Sep 30 17:12:25 crc kubenswrapper[4818]: I0930 17:12:25.686594 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dwbnt" event={"ID":"82c7a122-7887-4d75-a960-c8aa40a748f4","Type":"ContainerStarted","Data":"87b1b535414b4dd5d4c5d1e23eef4a95d544871de2b3b750b046e712cb220939"} Sep 30 17:12:25 crc kubenswrapper[4818]: I0930 17:12:25.686607 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dwbnt" event={"ID":"82c7a122-7887-4d75-a960-c8aa40a748f4","Type":"ContainerStarted","Data":"0864e7cc78e2c23bf4923bd14c52309c849f0d410c9457886ec6de1c9e1eca1a"} Sep 30 17:12:25 crc kubenswrapper[4818]: I0930 17:12:25.686617 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dwbnt" event={"ID":"82c7a122-7887-4d75-a960-c8aa40a748f4","Type":"ContainerStarted","Data":"3f322d9001083f19a1aafe16d69717b0e156cdd5d07bc712773c9170dd94644c"} Sep 30 17:12:25 crc kubenswrapper[4818]: I0930 17:12:25.686628 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dwbnt" event={"ID":"82c7a122-7887-4d75-a960-c8aa40a748f4","Type":"ContainerStarted","Data":"dbe17d30df8d155621242a26e9b0b29ccb9cd8796648bf544b39608f1c8b6486"} Sep 30 17:12:26 crc kubenswrapper[4818]: I0930 17:12:26.126279 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zwdvz"] Sep 30 17:12:26 crc kubenswrapper[4818]: I0930 17:12:26.127579 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zwdvz" Sep 30 17:12:26 crc kubenswrapper[4818]: I0930 17:12:26.142971 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zwdvz"] Sep 30 17:12:26 crc kubenswrapper[4818]: I0930 17:12:26.165382 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a734e907-59ba-40b2-9b2a-5ba7a7c6dacb-catalog-content\") pod \"redhat-operators-zwdvz\" (UID: \"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb\") " pod="openshift-marketplace/redhat-operators-zwdvz" Sep 30 17:12:26 crc kubenswrapper[4818]: I0930 17:12:26.165426 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a734e907-59ba-40b2-9b2a-5ba7a7c6dacb-utilities\") pod \"redhat-operators-zwdvz\" (UID: \"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb\") " pod="openshift-marketplace/redhat-operators-zwdvz" Sep 30 17:12:26 crc kubenswrapper[4818]: I0930 17:12:26.165505 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnkxf\" (UniqueName: \"kubernetes.io/projected/a734e907-59ba-40b2-9b2a-5ba7a7c6dacb-kube-api-access-pnkxf\") pod \"redhat-operators-zwdvz\" (UID: \"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb\") " pod="openshift-marketplace/redhat-operators-zwdvz" Sep 30 17:12:26 crc kubenswrapper[4818]: I0930 17:12:26.267167 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnkxf\" (UniqueName: \"kubernetes.io/projected/a734e907-59ba-40b2-9b2a-5ba7a7c6dacb-kube-api-access-pnkxf\") pod \"redhat-operators-zwdvz\" (UID: \"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb\") " pod="openshift-marketplace/redhat-operators-zwdvz" Sep 30 17:12:26 crc kubenswrapper[4818]: I0930 17:12:26.267275 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a734e907-59ba-40b2-9b2a-5ba7a7c6dacb-catalog-content\") pod \"redhat-operators-zwdvz\" (UID: \"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb\") " pod="openshift-marketplace/redhat-operators-zwdvz" Sep 30 17:12:26 crc kubenswrapper[4818]: I0930 17:12:26.267297 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a734e907-59ba-40b2-9b2a-5ba7a7c6dacb-utilities\") pod \"redhat-operators-zwdvz\" (UID: \"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb\") " pod="openshift-marketplace/redhat-operators-zwdvz" Sep 30 17:12:26 crc kubenswrapper[4818]: I0930 17:12:26.267787 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a734e907-59ba-40b2-9b2a-5ba7a7c6dacb-catalog-content\") pod \"redhat-operators-zwdvz\" (UID: \"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb\") " pod="openshift-marketplace/redhat-operators-zwdvz" Sep 30 17:12:26 crc kubenswrapper[4818]: I0930 17:12:26.267878 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a734e907-59ba-40b2-9b2a-5ba7a7c6dacb-utilities\") pod \"redhat-operators-zwdvz\" (UID: \"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb\") " pod="openshift-marketplace/redhat-operators-zwdvz" Sep 30 17:12:26 crc kubenswrapper[4818]: I0930 17:12:26.290092 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnkxf\" (UniqueName: \"kubernetes.io/projected/a734e907-59ba-40b2-9b2a-5ba7a7c6dacb-kube-api-access-pnkxf\") pod \"redhat-operators-zwdvz\" (UID: \"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb\") " pod="openshift-marketplace/redhat-operators-zwdvz" Sep 30 17:12:26 crc kubenswrapper[4818]: I0930 17:12:26.443200 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zwdvz" Sep 30 17:12:26 crc kubenswrapper[4818]: I0930 17:12:26.700281 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dwbnt" event={"ID":"82c7a122-7887-4d75-a960-c8aa40a748f4","Type":"ContainerStarted","Data":"83871d5b418e5364185ce872905ce88c98ef75c67bd37c40fdcad41a80ac6192"} Sep 30 17:12:26 crc kubenswrapper[4818]: I0930 17:12:26.703100 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:26 crc kubenswrapper[4818]: I0930 17:12:26.724977 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-dwbnt" podStartSLOduration=6.293568838 podStartE2EDuration="13.724960416s" podCreationTimestamp="2025-09-30 17:12:13 +0000 UTC" firstStartedPulling="2025-09-30 17:12:14.458133212 +0000 UTC m=+781.212405028" lastFinishedPulling="2025-09-30 17:12:21.88952479 +0000 UTC m=+788.643796606" observedRunningTime="2025-09-30 17:12:26.721945225 +0000 UTC m=+793.476217041" watchObservedRunningTime="2025-09-30 17:12:26.724960416 +0000 UTC m=+793.479232232" Sep 30 17:12:26 crc kubenswrapper[4818]: I0930 17:12:26.897779 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zwdvz"] Sep 30 17:12:26 crc kubenswrapper[4818]: W0930 17:12:26.905135 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda734e907_59ba_40b2_9b2a_5ba7a7c6dacb.slice/crio-e4eede49f2c6ad22399e9c923c34b3d73198ab9f148ed35dac52c5d92308d189 WatchSource:0}: Error finding container e4eede49f2c6ad22399e9c923c34b3d73198ab9f148ed35dac52c5d92308d189: Status 404 returned error can't find the container with id e4eede49f2c6ad22399e9c923c34b3d73198ab9f148ed35dac52c5d92308d189 Sep 30 17:12:27 crc kubenswrapper[4818]: I0930 17:12:27.715116 4818 generic.go:334] "Generic (PLEG): container finished" podID="a734e907-59ba-40b2-9b2a-5ba7a7c6dacb" containerID="f4e1057a4a29c32f4e502ab353356742478a9b76d5f1bdc5c4fca4b158262675" exitCode=0 Sep 30 17:12:27 crc kubenswrapper[4818]: I0930 17:12:27.715419 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zwdvz" event={"ID":"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb","Type":"ContainerDied","Data":"f4e1057a4a29c32f4e502ab353356742478a9b76d5f1bdc5c4fca4b158262675"} Sep 30 17:12:27 crc kubenswrapper[4818]: I0930 17:12:27.715489 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zwdvz" event={"ID":"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb","Type":"ContainerStarted","Data":"e4eede49f2c6ad22399e9c923c34b3d73198ab9f148ed35dac52c5d92308d189"} Sep 30 17:12:29 crc kubenswrapper[4818]: I0930 17:12:29.280581 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:29 crc kubenswrapper[4818]: I0930 17:12:29.343776 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:29 crc kubenswrapper[4818]: I0930 17:12:29.734722 4818 generic.go:334] "Generic (PLEG): container finished" podID="a734e907-59ba-40b2-9b2a-5ba7a7c6dacb" containerID="3c15b844456fb9948812ec86cf270ed5cdb4c35cc628d434dd6cbf0f455cddf3" exitCode=0 Sep 30 17:12:29 crc kubenswrapper[4818]: I0930 17:12:29.734762 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zwdvz" event={"ID":"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb","Type":"ContainerDied","Data":"3c15b844456fb9948812ec86cf270ed5cdb4c35cc628d434dd6cbf0f455cddf3"} Sep 30 17:12:31 crc kubenswrapper[4818]: I0930 17:12:31.750732 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zwdvz" event={"ID":"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb","Type":"ContainerStarted","Data":"c7ec8c618d208ede2def7c21be6607beb82482767cbc0e23c765d16474497e77"} Sep 30 17:12:31 crc kubenswrapper[4818]: I0930 17:12:31.776688 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zwdvz" podStartSLOduration=2.865866494 podStartE2EDuration="5.776659107s" podCreationTimestamp="2025-09-30 17:12:26 +0000 UTC" firstStartedPulling="2025-09-30 17:12:27.718848385 +0000 UTC m=+794.473120241" lastFinishedPulling="2025-09-30 17:12:30.629640998 +0000 UTC m=+797.383912854" observedRunningTime="2025-09-30 17:12:31.770694716 +0000 UTC m=+798.524966572" watchObservedRunningTime="2025-09-30 17:12:31.776659107 +0000 UTC m=+798.530930943" Sep 30 17:12:32 crc kubenswrapper[4818]: I0930 17:12:32.823477 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bbxjj"] Sep 30 17:12:32 crc kubenswrapper[4818]: I0930 17:12:32.826709 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bbxjj" Sep 30 17:12:32 crc kubenswrapper[4818]: I0930 17:12:32.847368 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bbxjj"] Sep 30 17:12:32 crc kubenswrapper[4818]: I0930 17:12:32.869311 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcng2\" (UniqueName: \"kubernetes.io/projected/d8d214a7-d559-4d43-a6e7-3e81d6988d69-kube-api-access-rcng2\") pod \"community-operators-bbxjj\" (UID: \"d8d214a7-d559-4d43-a6e7-3e81d6988d69\") " pod="openshift-marketplace/community-operators-bbxjj" Sep 30 17:12:32 crc kubenswrapper[4818]: I0930 17:12:32.869435 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8d214a7-d559-4d43-a6e7-3e81d6988d69-catalog-content\") pod \"community-operators-bbxjj\" (UID: \"d8d214a7-d559-4d43-a6e7-3e81d6988d69\") " pod="openshift-marketplace/community-operators-bbxjj" Sep 30 17:12:32 crc kubenswrapper[4818]: I0930 17:12:32.869534 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8d214a7-d559-4d43-a6e7-3e81d6988d69-utilities\") pod \"community-operators-bbxjj\" (UID: \"d8d214a7-d559-4d43-a6e7-3e81d6988d69\") " pod="openshift-marketplace/community-operators-bbxjj" Sep 30 17:12:32 crc kubenswrapper[4818]: I0930 17:12:32.971095 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8d214a7-d559-4d43-a6e7-3e81d6988d69-catalog-content\") pod \"community-operators-bbxjj\" (UID: \"d8d214a7-d559-4d43-a6e7-3e81d6988d69\") " pod="openshift-marketplace/community-operators-bbxjj" Sep 30 17:12:32 crc kubenswrapper[4818]: I0930 17:12:32.971161 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8d214a7-d559-4d43-a6e7-3e81d6988d69-utilities\") pod \"community-operators-bbxjj\" (UID: \"d8d214a7-d559-4d43-a6e7-3e81d6988d69\") " pod="openshift-marketplace/community-operators-bbxjj" Sep 30 17:12:32 crc kubenswrapper[4818]: I0930 17:12:32.971215 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcng2\" (UniqueName: \"kubernetes.io/projected/d8d214a7-d559-4d43-a6e7-3e81d6988d69-kube-api-access-rcng2\") pod \"community-operators-bbxjj\" (UID: \"d8d214a7-d559-4d43-a6e7-3e81d6988d69\") " pod="openshift-marketplace/community-operators-bbxjj" Sep 30 17:12:32 crc kubenswrapper[4818]: I0930 17:12:32.971579 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8d214a7-d559-4d43-a6e7-3e81d6988d69-catalog-content\") pod \"community-operators-bbxjj\" (UID: \"d8d214a7-d559-4d43-a6e7-3e81d6988d69\") " pod="openshift-marketplace/community-operators-bbxjj" Sep 30 17:12:32 crc kubenswrapper[4818]: I0930 17:12:32.971668 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8d214a7-d559-4d43-a6e7-3e81d6988d69-utilities\") pod \"community-operators-bbxjj\" (UID: \"d8d214a7-d559-4d43-a6e7-3e81d6988d69\") " pod="openshift-marketplace/community-operators-bbxjj" Sep 30 17:12:32 crc kubenswrapper[4818]: I0930 17:12:32.999489 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcng2\" (UniqueName: \"kubernetes.io/projected/d8d214a7-d559-4d43-a6e7-3e81d6988d69-kube-api-access-rcng2\") pod \"community-operators-bbxjj\" (UID: \"d8d214a7-d559-4d43-a6e7-3e81d6988d69\") " pod="openshift-marketplace/community-operators-bbxjj" Sep 30 17:12:33 crc kubenswrapper[4818]: I0930 17:12:33.151258 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bbxjj" Sep 30 17:12:33 crc kubenswrapper[4818]: I0930 17:12:33.671656 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bbxjj"] Sep 30 17:12:33 crc kubenswrapper[4818]: I0930 17:12:33.766436 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbxjj" event={"ID":"d8d214a7-d559-4d43-a6e7-3e81d6988d69","Type":"ContainerStarted","Data":"74dc84891628a89d7f37edea6377653dcd309af2bd2b5766df5969bcb5d7d301"} Sep 30 17:12:34 crc kubenswrapper[4818]: I0930 17:12:34.270269 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-5478bdb765-kmldl" Sep 30 17:12:34 crc kubenswrapper[4818]: I0930 17:12:34.282762 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-dwbnt" Sep 30 17:12:34 crc kubenswrapper[4818]: I0930 17:12:34.688285 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-5d688f5ffc-46jj6" Sep 30 17:12:34 crc kubenswrapper[4818]: I0930 17:12:34.776458 4818 generic.go:334] "Generic (PLEG): container finished" podID="d8d214a7-d559-4d43-a6e7-3e81d6988d69" containerID="f77831add04241f2d1091e59f9a37640c3b2174303687878fe9a6a025b6a2230" exitCode=0 Sep 30 17:12:34 crc kubenswrapper[4818]: I0930 17:12:34.776533 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbxjj" event={"ID":"d8d214a7-d559-4d43-a6e7-3e81d6988d69","Type":"ContainerDied","Data":"f77831add04241f2d1091e59f9a37640c3b2174303687878fe9a6a025b6a2230"} Sep 30 17:12:35 crc kubenswrapper[4818]: I0930 17:12:35.869484 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-s4j9s" Sep 30 17:12:36 crc kubenswrapper[4818]: I0930 17:12:36.444955 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zwdvz" Sep 30 17:12:36 crc kubenswrapper[4818]: I0930 17:12:36.445410 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zwdvz" Sep 30 17:12:36 crc kubenswrapper[4818]: I0930 17:12:36.532950 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zwdvz" Sep 30 17:12:36 crc kubenswrapper[4818]: I0930 17:12:36.789798 4818 generic.go:334] "Generic (PLEG): container finished" podID="d8d214a7-d559-4d43-a6e7-3e81d6988d69" containerID="d0534149d2cc0f8ea7c5bf7bf1ddb40128392d9ec7568e160481ff7b5640f927" exitCode=0 Sep 30 17:12:36 crc kubenswrapper[4818]: I0930 17:12:36.790059 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbxjj" event={"ID":"d8d214a7-d559-4d43-a6e7-3e81d6988d69","Type":"ContainerDied","Data":"d0534149d2cc0f8ea7c5bf7bf1ddb40128392d9ec7568e160481ff7b5640f927"} Sep 30 17:12:36 crc kubenswrapper[4818]: I0930 17:12:36.839359 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zwdvz" Sep 30 17:12:37 crc kubenswrapper[4818]: I0930 17:12:37.798506 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbxjj" event={"ID":"d8d214a7-d559-4d43-a6e7-3e81d6988d69","Type":"ContainerStarted","Data":"74036aba131411550185a109a6a3315de47fc3cbdc84aeedcd72052354c03582"} Sep 30 17:12:37 crc kubenswrapper[4818]: I0930 17:12:37.819578 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bbxjj" podStartSLOduration=3.172020431 podStartE2EDuration="5.819562883s" podCreationTimestamp="2025-09-30 17:12:32 +0000 UTC" firstStartedPulling="2025-09-30 17:12:34.778730762 +0000 UTC m=+801.533002618" lastFinishedPulling="2025-09-30 17:12:37.426273234 +0000 UTC m=+804.180545070" observedRunningTime="2025-09-30 17:12:37.815139223 +0000 UTC m=+804.569411039" watchObservedRunningTime="2025-09-30 17:12:37.819562883 +0000 UTC m=+804.573834699" Sep 30 17:12:37 crc kubenswrapper[4818]: I0930 17:12:37.929456 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6"] Sep 30 17:12:37 crc kubenswrapper[4818]: I0930 17:12:37.930591 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6" Sep 30 17:12:37 crc kubenswrapper[4818]: I0930 17:12:37.932915 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Sep 30 17:12:37 crc kubenswrapper[4818]: I0930 17:12:37.944688 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6"] Sep 30 17:12:38 crc kubenswrapper[4818]: I0930 17:12:38.057320 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2b142dea-f8b9-4930-b066-64dd84db0dd5-bundle\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6\" (UID: \"2b142dea-f8b9-4930-b066-64dd84db0dd5\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6" Sep 30 17:12:38 crc kubenswrapper[4818]: I0930 17:12:38.057394 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2b142dea-f8b9-4930-b066-64dd84db0dd5-util\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6\" (UID: \"2b142dea-f8b9-4930-b066-64dd84db0dd5\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6" Sep 30 17:12:38 crc kubenswrapper[4818]: I0930 17:12:38.057446 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6b9n7\" (UniqueName: \"kubernetes.io/projected/2b142dea-f8b9-4930-b066-64dd84db0dd5-kube-api-access-6b9n7\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6\" (UID: \"2b142dea-f8b9-4930-b066-64dd84db0dd5\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6" Sep 30 17:12:38 crc kubenswrapper[4818]: I0930 17:12:38.158914 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2b142dea-f8b9-4930-b066-64dd84db0dd5-bundle\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6\" (UID: \"2b142dea-f8b9-4930-b066-64dd84db0dd5\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6" Sep 30 17:12:38 crc kubenswrapper[4818]: I0930 17:12:38.159021 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2b142dea-f8b9-4930-b066-64dd84db0dd5-util\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6\" (UID: \"2b142dea-f8b9-4930-b066-64dd84db0dd5\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6" Sep 30 17:12:38 crc kubenswrapper[4818]: I0930 17:12:38.159165 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6b9n7\" (UniqueName: \"kubernetes.io/projected/2b142dea-f8b9-4930-b066-64dd84db0dd5-kube-api-access-6b9n7\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6\" (UID: \"2b142dea-f8b9-4930-b066-64dd84db0dd5\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6" Sep 30 17:12:38 crc kubenswrapper[4818]: I0930 17:12:38.159484 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2b142dea-f8b9-4930-b066-64dd84db0dd5-bundle\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6\" (UID: \"2b142dea-f8b9-4930-b066-64dd84db0dd5\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6" Sep 30 17:12:38 crc kubenswrapper[4818]: I0930 17:12:38.159940 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2b142dea-f8b9-4930-b066-64dd84db0dd5-util\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6\" (UID: \"2b142dea-f8b9-4930-b066-64dd84db0dd5\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6" Sep 30 17:12:38 crc kubenswrapper[4818]: I0930 17:12:38.198626 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6b9n7\" (UniqueName: \"kubernetes.io/projected/2b142dea-f8b9-4930-b066-64dd84db0dd5-kube-api-access-6b9n7\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6\" (UID: \"2b142dea-f8b9-4930-b066-64dd84db0dd5\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6" Sep 30 17:12:38 crc kubenswrapper[4818]: I0930 17:12:38.244311 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6" Sep 30 17:12:38 crc kubenswrapper[4818]: I0930 17:12:38.714769 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6"] Sep 30 17:12:38 crc kubenswrapper[4818]: W0930 17:12:38.719883 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2b142dea_f8b9_4930_b066_64dd84db0dd5.slice/crio-71f09f59c3035b4f74cea3e67a644b751a945e354220680691d927853f5e90b1 WatchSource:0}: Error finding container 71f09f59c3035b4f74cea3e67a644b751a945e354220680691d927853f5e90b1: Status 404 returned error can't find the container with id 71f09f59c3035b4f74cea3e67a644b751a945e354220680691d927853f5e90b1 Sep 30 17:12:38 crc kubenswrapper[4818]: I0930 17:12:38.805605 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6" event={"ID":"2b142dea-f8b9-4930-b066-64dd84db0dd5","Type":"ContainerStarted","Data":"71f09f59c3035b4f74cea3e67a644b751a945e354220680691d927853f5e90b1"} Sep 30 17:12:39 crc kubenswrapper[4818]: I0930 17:12:39.274432 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zwdvz"] Sep 30 17:12:39 crc kubenswrapper[4818]: I0930 17:12:39.813428 4818 generic.go:334] "Generic (PLEG): container finished" podID="2b142dea-f8b9-4930-b066-64dd84db0dd5" containerID="783fd9726b69a447834e52b34822867d40e1e5966b82b0b8cbeb6dda19bfe371" exitCode=0 Sep 30 17:12:39 crc kubenswrapper[4818]: I0930 17:12:39.813553 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6" event={"ID":"2b142dea-f8b9-4930-b066-64dd84db0dd5","Type":"ContainerDied","Data":"783fd9726b69a447834e52b34822867d40e1e5966b82b0b8cbeb6dda19bfe371"} Sep 30 17:12:39 crc kubenswrapper[4818]: I0930 17:12:39.813833 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zwdvz" podUID="a734e907-59ba-40b2-9b2a-5ba7a7c6dacb" containerName="registry-server" containerID="cri-o://c7ec8c618d208ede2def7c21be6607beb82482767cbc0e23c765d16474497e77" gracePeriod=2 Sep 30 17:12:40 crc kubenswrapper[4818]: I0930 17:12:40.824888 4818 generic.go:334] "Generic (PLEG): container finished" podID="a734e907-59ba-40b2-9b2a-5ba7a7c6dacb" containerID="c7ec8c618d208ede2def7c21be6607beb82482767cbc0e23c765d16474497e77" exitCode=0 Sep 30 17:12:40 crc kubenswrapper[4818]: I0930 17:12:40.825178 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zwdvz" event={"ID":"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb","Type":"ContainerDied","Data":"c7ec8c618d208ede2def7c21be6607beb82482767cbc0e23c765d16474497e77"} Sep 30 17:12:41 crc kubenswrapper[4818]: I0930 17:12:41.662004 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zwdvz" Sep 30 17:12:41 crc kubenswrapper[4818]: I0930 17:12:41.811003 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a734e907-59ba-40b2-9b2a-5ba7a7c6dacb-catalog-content\") pod \"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb\" (UID: \"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb\") " Sep 30 17:12:41 crc kubenswrapper[4818]: I0930 17:12:41.811569 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a734e907-59ba-40b2-9b2a-5ba7a7c6dacb-utilities\") pod \"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb\" (UID: \"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb\") " Sep 30 17:12:41 crc kubenswrapper[4818]: I0930 17:12:41.811916 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pnkxf\" (UniqueName: \"kubernetes.io/projected/a734e907-59ba-40b2-9b2a-5ba7a7c6dacb-kube-api-access-pnkxf\") pod \"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb\" (UID: \"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb\") " Sep 30 17:12:41 crc kubenswrapper[4818]: I0930 17:12:41.813549 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a734e907-59ba-40b2-9b2a-5ba7a7c6dacb-utilities" (OuterVolumeSpecName: "utilities") pod "a734e907-59ba-40b2-9b2a-5ba7a7c6dacb" (UID: "a734e907-59ba-40b2-9b2a-5ba7a7c6dacb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:12:41 crc kubenswrapper[4818]: I0930 17:12:41.824239 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a734e907-59ba-40b2-9b2a-5ba7a7c6dacb-kube-api-access-pnkxf" (OuterVolumeSpecName: "kube-api-access-pnkxf") pod "a734e907-59ba-40b2-9b2a-5ba7a7c6dacb" (UID: "a734e907-59ba-40b2-9b2a-5ba7a7c6dacb"). InnerVolumeSpecName "kube-api-access-pnkxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:12:41 crc kubenswrapper[4818]: I0930 17:12:41.844092 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zwdvz" event={"ID":"a734e907-59ba-40b2-9b2a-5ba7a7c6dacb","Type":"ContainerDied","Data":"e4eede49f2c6ad22399e9c923c34b3d73198ab9f148ed35dac52c5d92308d189"} Sep 30 17:12:41 crc kubenswrapper[4818]: I0930 17:12:41.844108 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zwdvz" Sep 30 17:12:41 crc kubenswrapper[4818]: I0930 17:12:41.845360 4818 scope.go:117] "RemoveContainer" containerID="c7ec8c618d208ede2def7c21be6607beb82482767cbc0e23c765d16474497e77" Sep 30 17:12:41 crc kubenswrapper[4818]: I0930 17:12:41.873304 4818 scope.go:117] "RemoveContainer" containerID="3c15b844456fb9948812ec86cf270ed5cdb4c35cc628d434dd6cbf0f455cddf3" Sep 30 17:12:41 crc kubenswrapper[4818]: I0930 17:12:41.915100 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a734e907-59ba-40b2-9b2a-5ba7a7c6dacb-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 17:12:41 crc kubenswrapper[4818]: I0930 17:12:41.915225 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pnkxf\" (UniqueName: \"kubernetes.io/projected/a734e907-59ba-40b2-9b2a-5ba7a7c6dacb-kube-api-access-pnkxf\") on node \"crc\" DevicePath \"\"" Sep 30 17:12:41 crc kubenswrapper[4818]: I0930 17:12:41.921908 4818 scope.go:117] "RemoveContainer" containerID="f4e1057a4a29c32f4e502ab353356742478a9b76d5f1bdc5c4fca4b158262675" Sep 30 17:12:41 crc kubenswrapper[4818]: I0930 17:12:41.928302 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a734e907-59ba-40b2-9b2a-5ba7a7c6dacb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a734e907-59ba-40b2-9b2a-5ba7a7c6dacb" (UID: "a734e907-59ba-40b2-9b2a-5ba7a7c6dacb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:12:42 crc kubenswrapper[4818]: I0930 17:12:42.016142 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a734e907-59ba-40b2-9b2a-5ba7a7c6dacb-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 17:12:42 crc kubenswrapper[4818]: I0930 17:12:42.185447 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zwdvz"] Sep 30 17:12:42 crc kubenswrapper[4818]: I0930 17:12:42.189965 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zwdvz"] Sep 30 17:12:43 crc kubenswrapper[4818]: I0930 17:12:43.157115 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bbxjj" Sep 30 17:12:43 crc kubenswrapper[4818]: I0930 17:12:43.157359 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bbxjj" Sep 30 17:12:43 crc kubenswrapper[4818]: I0930 17:12:43.205549 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bbxjj" Sep 30 17:12:43 crc kubenswrapper[4818]: I0930 17:12:43.908421 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bbxjj" Sep 30 17:12:44 crc kubenswrapper[4818]: I0930 17:12:44.029564 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a734e907-59ba-40b2-9b2a-5ba7a7c6dacb" path="/var/lib/kubelet/pods/a734e907-59ba-40b2-9b2a-5ba7a7c6dacb/volumes" Sep 30 17:12:44 crc kubenswrapper[4818]: I0930 17:12:44.874545 4818 generic.go:334] "Generic (PLEG): container finished" podID="2b142dea-f8b9-4930-b066-64dd84db0dd5" containerID="c11f231ccb126094b0acb16ffc1232743cff466235ac0dc249b43ced38dd0393" exitCode=0 Sep 30 17:12:44 crc kubenswrapper[4818]: I0930 17:12:44.874823 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6" event={"ID":"2b142dea-f8b9-4930-b066-64dd84db0dd5","Type":"ContainerDied","Data":"c11f231ccb126094b0acb16ffc1232743cff466235ac0dc249b43ced38dd0393"} Sep 30 17:12:45 crc kubenswrapper[4818]: I0930 17:12:45.887433 4818 generic.go:334] "Generic (PLEG): container finished" podID="2b142dea-f8b9-4930-b066-64dd84db0dd5" containerID="34eb86dc0210de537fc51b7fd65cdbf86718b944e6ff2379bf6db8a6624a2461" exitCode=0 Sep 30 17:12:45 crc kubenswrapper[4818]: I0930 17:12:45.887557 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6" event={"ID":"2b142dea-f8b9-4930-b066-64dd84db0dd5","Type":"ContainerDied","Data":"34eb86dc0210de537fc51b7fd65cdbf86718b944e6ff2379bf6db8a6624a2461"} Sep 30 17:12:46 crc kubenswrapper[4818]: I0930 17:12:46.689619 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-nzt4h"] Sep 30 17:12:46 crc kubenswrapper[4818]: E0930 17:12:46.690136 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a734e907-59ba-40b2-9b2a-5ba7a7c6dacb" containerName="registry-server" Sep 30 17:12:46 crc kubenswrapper[4818]: I0930 17:12:46.690179 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="a734e907-59ba-40b2-9b2a-5ba7a7c6dacb" containerName="registry-server" Sep 30 17:12:46 crc kubenswrapper[4818]: E0930 17:12:46.690226 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a734e907-59ba-40b2-9b2a-5ba7a7c6dacb" containerName="extract-content" Sep 30 17:12:46 crc kubenswrapper[4818]: I0930 17:12:46.690245 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="a734e907-59ba-40b2-9b2a-5ba7a7c6dacb" containerName="extract-content" Sep 30 17:12:46 crc kubenswrapper[4818]: E0930 17:12:46.690282 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a734e907-59ba-40b2-9b2a-5ba7a7c6dacb" containerName="extract-utilities" Sep 30 17:12:46 crc kubenswrapper[4818]: I0930 17:12:46.690303 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="a734e907-59ba-40b2-9b2a-5ba7a7c6dacb" containerName="extract-utilities" Sep 30 17:12:46 crc kubenswrapper[4818]: I0930 17:12:46.690617 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="a734e907-59ba-40b2-9b2a-5ba7a7c6dacb" containerName="registry-server" Sep 30 17:12:46 crc kubenswrapper[4818]: I0930 17:12:46.692662 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nzt4h" Sep 30 17:12:46 crc kubenswrapper[4818]: I0930 17:12:46.700202 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nzt4h"] Sep 30 17:12:46 crc kubenswrapper[4818]: I0930 17:12:46.784812 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33dec9db-5f6e-421d-a467-fa00eab01855-catalog-content\") pod \"redhat-marketplace-nzt4h\" (UID: \"33dec9db-5f6e-421d-a467-fa00eab01855\") " pod="openshift-marketplace/redhat-marketplace-nzt4h" Sep 30 17:12:46 crc kubenswrapper[4818]: I0930 17:12:46.785001 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdsn4\" (UniqueName: \"kubernetes.io/projected/33dec9db-5f6e-421d-a467-fa00eab01855-kube-api-access-kdsn4\") pod \"redhat-marketplace-nzt4h\" (UID: \"33dec9db-5f6e-421d-a467-fa00eab01855\") " pod="openshift-marketplace/redhat-marketplace-nzt4h" Sep 30 17:12:46 crc kubenswrapper[4818]: I0930 17:12:46.785090 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33dec9db-5f6e-421d-a467-fa00eab01855-utilities\") pod \"redhat-marketplace-nzt4h\" (UID: \"33dec9db-5f6e-421d-a467-fa00eab01855\") " pod="openshift-marketplace/redhat-marketplace-nzt4h" Sep 30 17:12:46 crc kubenswrapper[4818]: I0930 17:12:46.886955 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33dec9db-5f6e-421d-a467-fa00eab01855-catalog-content\") pod \"redhat-marketplace-nzt4h\" (UID: \"33dec9db-5f6e-421d-a467-fa00eab01855\") " pod="openshift-marketplace/redhat-marketplace-nzt4h" Sep 30 17:12:46 crc kubenswrapper[4818]: I0930 17:12:46.887326 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdsn4\" (UniqueName: \"kubernetes.io/projected/33dec9db-5f6e-421d-a467-fa00eab01855-kube-api-access-kdsn4\") pod \"redhat-marketplace-nzt4h\" (UID: \"33dec9db-5f6e-421d-a467-fa00eab01855\") " pod="openshift-marketplace/redhat-marketplace-nzt4h" Sep 30 17:12:46 crc kubenswrapper[4818]: I0930 17:12:46.887497 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33dec9db-5f6e-421d-a467-fa00eab01855-utilities\") pod \"redhat-marketplace-nzt4h\" (UID: \"33dec9db-5f6e-421d-a467-fa00eab01855\") " pod="openshift-marketplace/redhat-marketplace-nzt4h" Sep 30 17:12:46 crc kubenswrapper[4818]: I0930 17:12:46.887655 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33dec9db-5f6e-421d-a467-fa00eab01855-catalog-content\") pod \"redhat-marketplace-nzt4h\" (UID: \"33dec9db-5f6e-421d-a467-fa00eab01855\") " pod="openshift-marketplace/redhat-marketplace-nzt4h" Sep 30 17:12:46 crc kubenswrapper[4818]: I0930 17:12:46.887814 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33dec9db-5f6e-421d-a467-fa00eab01855-utilities\") pod \"redhat-marketplace-nzt4h\" (UID: \"33dec9db-5f6e-421d-a467-fa00eab01855\") " pod="openshift-marketplace/redhat-marketplace-nzt4h" Sep 30 17:12:46 crc kubenswrapper[4818]: I0930 17:12:46.909907 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdsn4\" (UniqueName: \"kubernetes.io/projected/33dec9db-5f6e-421d-a467-fa00eab01855-kube-api-access-kdsn4\") pod \"redhat-marketplace-nzt4h\" (UID: \"33dec9db-5f6e-421d-a467-fa00eab01855\") " pod="openshift-marketplace/redhat-marketplace-nzt4h" Sep 30 17:12:47 crc kubenswrapper[4818]: I0930 17:12:47.013434 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nzt4h" Sep 30 17:12:47 crc kubenswrapper[4818]: I0930 17:12:47.217774 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6" Sep 30 17:12:47 crc kubenswrapper[4818]: I0930 17:12:47.393743 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6b9n7\" (UniqueName: \"kubernetes.io/projected/2b142dea-f8b9-4930-b066-64dd84db0dd5-kube-api-access-6b9n7\") pod \"2b142dea-f8b9-4930-b066-64dd84db0dd5\" (UID: \"2b142dea-f8b9-4930-b066-64dd84db0dd5\") " Sep 30 17:12:47 crc kubenswrapper[4818]: I0930 17:12:47.394129 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2b142dea-f8b9-4930-b066-64dd84db0dd5-util\") pod \"2b142dea-f8b9-4930-b066-64dd84db0dd5\" (UID: \"2b142dea-f8b9-4930-b066-64dd84db0dd5\") " Sep 30 17:12:47 crc kubenswrapper[4818]: I0930 17:12:47.394207 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2b142dea-f8b9-4930-b066-64dd84db0dd5-bundle\") pod \"2b142dea-f8b9-4930-b066-64dd84db0dd5\" (UID: \"2b142dea-f8b9-4930-b066-64dd84db0dd5\") " Sep 30 17:12:47 crc kubenswrapper[4818]: I0930 17:12:47.394999 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b142dea-f8b9-4930-b066-64dd84db0dd5-bundle" (OuterVolumeSpecName: "bundle") pod "2b142dea-f8b9-4930-b066-64dd84db0dd5" (UID: "2b142dea-f8b9-4930-b066-64dd84db0dd5"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:12:47 crc kubenswrapper[4818]: I0930 17:12:47.396875 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b142dea-f8b9-4930-b066-64dd84db0dd5-kube-api-access-6b9n7" (OuterVolumeSpecName: "kube-api-access-6b9n7") pod "2b142dea-f8b9-4930-b066-64dd84db0dd5" (UID: "2b142dea-f8b9-4930-b066-64dd84db0dd5"). InnerVolumeSpecName "kube-api-access-6b9n7". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:12:47 crc kubenswrapper[4818]: I0930 17:12:47.403896 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b142dea-f8b9-4930-b066-64dd84db0dd5-util" (OuterVolumeSpecName: "util") pod "2b142dea-f8b9-4930-b066-64dd84db0dd5" (UID: "2b142dea-f8b9-4930-b066-64dd84db0dd5"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:12:47 crc kubenswrapper[4818]: I0930 17:12:47.463914 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nzt4h"] Sep 30 17:12:47 crc kubenswrapper[4818]: W0930 17:12:47.470617 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod33dec9db_5f6e_421d_a467_fa00eab01855.slice/crio-a7519d34b7dfa3c8710b65d435d637fa42f3d1f8f516191141ac157d19cd3705 WatchSource:0}: Error finding container a7519d34b7dfa3c8710b65d435d637fa42f3d1f8f516191141ac157d19cd3705: Status 404 returned error can't find the container with id a7519d34b7dfa3c8710b65d435d637fa42f3d1f8f516191141ac157d19cd3705 Sep 30 17:12:47 crc kubenswrapper[4818]: I0930 17:12:47.495886 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6b9n7\" (UniqueName: \"kubernetes.io/projected/2b142dea-f8b9-4930-b066-64dd84db0dd5-kube-api-access-6b9n7\") on node \"crc\" DevicePath \"\"" Sep 30 17:12:47 crc kubenswrapper[4818]: I0930 17:12:47.495913 4818 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2b142dea-f8b9-4930-b066-64dd84db0dd5-util\") on node \"crc\" DevicePath \"\"" Sep 30 17:12:47 crc kubenswrapper[4818]: I0930 17:12:47.495942 4818 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2b142dea-f8b9-4930-b066-64dd84db0dd5-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:12:47 crc kubenswrapper[4818]: I0930 17:12:47.901863 4818 generic.go:334] "Generic (PLEG): container finished" podID="33dec9db-5f6e-421d-a467-fa00eab01855" containerID="57e061a8ad7613f83faec75fd551006c776ce7aa21a6921895e9e7704cf61091" exitCode=0 Sep 30 17:12:47 crc kubenswrapper[4818]: I0930 17:12:47.901914 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nzt4h" event={"ID":"33dec9db-5f6e-421d-a467-fa00eab01855","Type":"ContainerDied","Data":"57e061a8ad7613f83faec75fd551006c776ce7aa21a6921895e9e7704cf61091"} Sep 30 17:12:47 crc kubenswrapper[4818]: I0930 17:12:47.902003 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nzt4h" event={"ID":"33dec9db-5f6e-421d-a467-fa00eab01855","Type":"ContainerStarted","Data":"a7519d34b7dfa3c8710b65d435d637fa42f3d1f8f516191141ac157d19cd3705"} Sep 30 17:12:47 crc kubenswrapper[4818]: I0930 17:12:47.904633 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6" event={"ID":"2b142dea-f8b9-4930-b066-64dd84db0dd5","Type":"ContainerDied","Data":"71f09f59c3035b4f74cea3e67a644b751a945e354220680691d927853f5e90b1"} Sep 30 17:12:47 crc kubenswrapper[4818]: I0930 17:12:47.904670 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="71f09f59c3035b4f74cea3e67a644b751a945e354220680691d927853f5e90b1" Sep 30 17:12:47 crc kubenswrapper[4818]: I0930 17:12:47.904771 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6" Sep 30 17:12:48 crc kubenswrapper[4818]: I0930 17:12:48.675200 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bbxjj"] Sep 30 17:12:48 crc kubenswrapper[4818]: I0930 17:12:48.675840 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bbxjj" podUID="d8d214a7-d559-4d43-a6e7-3e81d6988d69" containerName="registry-server" containerID="cri-o://74036aba131411550185a109a6a3315de47fc3cbdc84aeedcd72052354c03582" gracePeriod=2 Sep 30 17:12:48 crc kubenswrapper[4818]: I0930 17:12:48.916357 4818 generic.go:334] "Generic (PLEG): container finished" podID="d8d214a7-d559-4d43-a6e7-3e81d6988d69" containerID="74036aba131411550185a109a6a3315de47fc3cbdc84aeedcd72052354c03582" exitCode=0 Sep 30 17:12:48 crc kubenswrapper[4818]: I0930 17:12:48.916418 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbxjj" event={"ID":"d8d214a7-d559-4d43-a6e7-3e81d6988d69","Type":"ContainerDied","Data":"74036aba131411550185a109a6a3315de47fc3cbdc84aeedcd72052354c03582"} Sep 30 17:12:48 crc kubenswrapper[4818]: I0930 17:12:48.919075 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nzt4h" event={"ID":"33dec9db-5f6e-421d-a467-fa00eab01855","Type":"ContainerStarted","Data":"74bd09c88d3ae5d8f628ba5ebb534383b9196724d55f2d44942811f50329a0a7"} Sep 30 17:12:49 crc kubenswrapper[4818]: I0930 17:12:49.089612 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bbxjj" Sep 30 17:12:49 crc kubenswrapper[4818]: I0930 17:12:49.222343 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8d214a7-d559-4d43-a6e7-3e81d6988d69-catalog-content\") pod \"d8d214a7-d559-4d43-a6e7-3e81d6988d69\" (UID: \"d8d214a7-d559-4d43-a6e7-3e81d6988d69\") " Sep 30 17:12:49 crc kubenswrapper[4818]: I0930 17:12:49.222775 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rcng2\" (UniqueName: \"kubernetes.io/projected/d8d214a7-d559-4d43-a6e7-3e81d6988d69-kube-api-access-rcng2\") pod \"d8d214a7-d559-4d43-a6e7-3e81d6988d69\" (UID: \"d8d214a7-d559-4d43-a6e7-3e81d6988d69\") " Sep 30 17:12:49 crc kubenswrapper[4818]: I0930 17:12:49.222846 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8d214a7-d559-4d43-a6e7-3e81d6988d69-utilities\") pod \"d8d214a7-d559-4d43-a6e7-3e81d6988d69\" (UID: \"d8d214a7-d559-4d43-a6e7-3e81d6988d69\") " Sep 30 17:12:49 crc kubenswrapper[4818]: I0930 17:12:49.223706 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8d214a7-d559-4d43-a6e7-3e81d6988d69-utilities" (OuterVolumeSpecName: "utilities") pod "d8d214a7-d559-4d43-a6e7-3e81d6988d69" (UID: "d8d214a7-d559-4d43-a6e7-3e81d6988d69"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:12:49 crc kubenswrapper[4818]: I0930 17:12:49.228361 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8d214a7-d559-4d43-a6e7-3e81d6988d69-kube-api-access-rcng2" (OuterVolumeSpecName: "kube-api-access-rcng2") pod "d8d214a7-d559-4d43-a6e7-3e81d6988d69" (UID: "d8d214a7-d559-4d43-a6e7-3e81d6988d69"). InnerVolumeSpecName "kube-api-access-rcng2". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:12:49 crc kubenswrapper[4818]: I0930 17:12:49.264947 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8d214a7-d559-4d43-a6e7-3e81d6988d69-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d8d214a7-d559-4d43-a6e7-3e81d6988d69" (UID: "d8d214a7-d559-4d43-a6e7-3e81d6988d69"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:12:49 crc kubenswrapper[4818]: I0930 17:12:49.323962 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8d214a7-d559-4d43-a6e7-3e81d6988d69-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 17:12:49 crc kubenswrapper[4818]: I0930 17:12:49.323996 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rcng2\" (UniqueName: \"kubernetes.io/projected/d8d214a7-d559-4d43-a6e7-3e81d6988d69-kube-api-access-rcng2\") on node \"crc\" DevicePath \"\"" Sep 30 17:12:49 crc kubenswrapper[4818]: I0930 17:12:49.324012 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8d214a7-d559-4d43-a6e7-3e81d6988d69-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 17:12:49 crc kubenswrapper[4818]: I0930 17:12:49.942140 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbxjj" event={"ID":"d8d214a7-d559-4d43-a6e7-3e81d6988d69","Type":"ContainerDied","Data":"74dc84891628a89d7f37edea6377653dcd309af2bd2b5766df5969bcb5d7d301"} Sep 30 17:12:49 crc kubenswrapper[4818]: I0930 17:12:49.942224 4818 scope.go:117] "RemoveContainer" containerID="74036aba131411550185a109a6a3315de47fc3cbdc84aeedcd72052354c03582" Sep 30 17:12:49 crc kubenswrapper[4818]: I0930 17:12:49.942160 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bbxjj" Sep 30 17:12:49 crc kubenswrapper[4818]: I0930 17:12:49.946262 4818 generic.go:334] "Generic (PLEG): container finished" podID="33dec9db-5f6e-421d-a467-fa00eab01855" containerID="74bd09c88d3ae5d8f628ba5ebb534383b9196724d55f2d44942811f50329a0a7" exitCode=0 Sep 30 17:12:49 crc kubenswrapper[4818]: I0930 17:12:49.946329 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nzt4h" event={"ID":"33dec9db-5f6e-421d-a467-fa00eab01855","Type":"ContainerDied","Data":"74bd09c88d3ae5d8f628ba5ebb534383b9196724d55f2d44942811f50329a0a7"} Sep 30 17:12:49 crc kubenswrapper[4818]: I0930 17:12:49.967516 4818 scope.go:117] "RemoveContainer" containerID="d0534149d2cc0f8ea7c5bf7bf1ddb40128392d9ec7568e160481ff7b5640f927" Sep 30 17:12:49 crc kubenswrapper[4818]: I0930 17:12:49.994518 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bbxjj"] Sep 30 17:12:49 crc kubenswrapper[4818]: I0930 17:12:49.995151 4818 scope.go:117] "RemoveContainer" containerID="f77831add04241f2d1091e59f9a37640c3b2174303687878fe9a6a025b6a2230" Sep 30 17:12:49 crc kubenswrapper[4818]: I0930 17:12:49.998945 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bbxjj"] Sep 30 17:12:50 crc kubenswrapper[4818]: I0930 17:12:50.031140 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8d214a7-d559-4d43-a6e7-3e81d6988d69" path="/var/lib/kubelet/pods/d8d214a7-d559-4d43-a6e7-3e81d6988d69/volumes" Sep 30 17:12:50 crc kubenswrapper[4818]: I0930 17:12:50.964719 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nzt4h" event={"ID":"33dec9db-5f6e-421d-a467-fa00eab01855","Type":"ContainerStarted","Data":"ba84e37236e59bcf2b093e68b58ed59d86bf342fafd97f94428c2de76e3cfef3"} Sep 30 17:12:50 crc kubenswrapper[4818]: I0930 17:12:50.994327 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-nzt4h" podStartSLOduration=2.5192930369999997 podStartE2EDuration="4.994306417s" podCreationTimestamp="2025-09-30 17:12:46 +0000 UTC" firstStartedPulling="2025-09-30 17:12:47.903786616 +0000 UTC m=+814.658058472" lastFinishedPulling="2025-09-30 17:12:50.378799996 +0000 UTC m=+817.133071852" observedRunningTime="2025-09-30 17:12:50.991406559 +0000 UTC m=+817.745678385" watchObservedRunningTime="2025-09-30 17:12:50.994306417 +0000 UTC m=+817.748578233" Sep 30 17:12:53 crc kubenswrapper[4818]: I0930 17:12:53.261165 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-fnfr4"] Sep 30 17:12:53 crc kubenswrapper[4818]: E0930 17:12:53.261530 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8d214a7-d559-4d43-a6e7-3e81d6988d69" containerName="registry-server" Sep 30 17:12:53 crc kubenswrapper[4818]: I0930 17:12:53.261549 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8d214a7-d559-4d43-a6e7-3e81d6988d69" containerName="registry-server" Sep 30 17:12:53 crc kubenswrapper[4818]: E0930 17:12:53.261568 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b142dea-f8b9-4930-b066-64dd84db0dd5" containerName="extract" Sep 30 17:12:53 crc kubenswrapper[4818]: I0930 17:12:53.261579 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b142dea-f8b9-4930-b066-64dd84db0dd5" containerName="extract" Sep 30 17:12:53 crc kubenswrapper[4818]: E0930 17:12:53.261596 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8d214a7-d559-4d43-a6e7-3e81d6988d69" containerName="extract-content" Sep 30 17:12:53 crc kubenswrapper[4818]: I0930 17:12:53.261609 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8d214a7-d559-4d43-a6e7-3e81d6988d69" containerName="extract-content" Sep 30 17:12:53 crc kubenswrapper[4818]: E0930 17:12:53.261628 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8d214a7-d559-4d43-a6e7-3e81d6988d69" containerName="extract-utilities" Sep 30 17:12:53 crc kubenswrapper[4818]: I0930 17:12:53.261640 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8d214a7-d559-4d43-a6e7-3e81d6988d69" containerName="extract-utilities" Sep 30 17:12:53 crc kubenswrapper[4818]: E0930 17:12:53.261664 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b142dea-f8b9-4930-b066-64dd84db0dd5" containerName="pull" Sep 30 17:12:53 crc kubenswrapper[4818]: I0930 17:12:53.261674 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b142dea-f8b9-4930-b066-64dd84db0dd5" containerName="pull" Sep 30 17:12:53 crc kubenswrapper[4818]: E0930 17:12:53.261692 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b142dea-f8b9-4930-b066-64dd84db0dd5" containerName="util" Sep 30 17:12:53 crc kubenswrapper[4818]: I0930 17:12:53.261702 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b142dea-f8b9-4930-b066-64dd84db0dd5" containerName="util" Sep 30 17:12:53 crc kubenswrapper[4818]: I0930 17:12:53.261865 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8d214a7-d559-4d43-a6e7-3e81d6988d69" containerName="registry-server" Sep 30 17:12:53 crc kubenswrapper[4818]: I0930 17:12:53.261892 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b142dea-f8b9-4930-b066-64dd84db0dd5" containerName="extract" Sep 30 17:12:53 crc kubenswrapper[4818]: I0930 17:12:53.262567 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-fnfr4" Sep 30 17:12:53 crc kubenswrapper[4818]: I0930 17:12:53.267713 4818 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-ghxfb" Sep 30 17:12:53 crc kubenswrapper[4818]: I0930 17:12:53.268966 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Sep 30 17:12:53 crc kubenswrapper[4818]: I0930 17:12:53.268992 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Sep 30 17:12:53 crc kubenswrapper[4818]: I0930 17:12:53.277591 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-fnfr4"] Sep 30 17:12:53 crc kubenswrapper[4818]: I0930 17:12:53.377169 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kb9w9\" (UniqueName: \"kubernetes.io/projected/b737a6c7-ee93-4bdc-a478-1ef7dd4bc9de-kube-api-access-kb9w9\") pod \"cert-manager-operator-controller-manager-57cd46d6d-fnfr4\" (UID: \"b737a6c7-ee93-4bdc-a478-1ef7dd4bc9de\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-fnfr4" Sep 30 17:12:53 crc kubenswrapper[4818]: I0930 17:12:53.478768 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kb9w9\" (UniqueName: \"kubernetes.io/projected/b737a6c7-ee93-4bdc-a478-1ef7dd4bc9de-kube-api-access-kb9w9\") pod \"cert-manager-operator-controller-manager-57cd46d6d-fnfr4\" (UID: \"b737a6c7-ee93-4bdc-a478-1ef7dd4bc9de\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-fnfr4" Sep 30 17:12:53 crc kubenswrapper[4818]: I0930 17:12:53.500094 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kb9w9\" (UniqueName: \"kubernetes.io/projected/b737a6c7-ee93-4bdc-a478-1ef7dd4bc9de-kube-api-access-kb9w9\") pod \"cert-manager-operator-controller-manager-57cd46d6d-fnfr4\" (UID: \"b737a6c7-ee93-4bdc-a478-1ef7dd4bc9de\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-fnfr4" Sep 30 17:12:53 crc kubenswrapper[4818]: I0930 17:12:53.576959 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-fnfr4" Sep 30 17:12:54 crc kubenswrapper[4818]: I0930 17:12:54.059075 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-fnfr4"] Sep 30 17:12:54 crc kubenswrapper[4818]: I0930 17:12:54.991266 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-fnfr4" event={"ID":"b737a6c7-ee93-4bdc-a478-1ef7dd4bc9de","Type":"ContainerStarted","Data":"7076d318edc7cde7f61c5afa9b66c418c512b40acd21fea68cd93e1785021a0c"} Sep 30 17:12:57 crc kubenswrapper[4818]: I0930 17:12:57.014214 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-nzt4h" Sep 30 17:12:57 crc kubenswrapper[4818]: I0930 17:12:57.014534 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-nzt4h" Sep 30 17:12:57 crc kubenswrapper[4818]: I0930 17:12:57.063297 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-nzt4h" Sep 30 17:12:58 crc kubenswrapper[4818]: I0930 17:12:58.016324 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-fnfr4" event={"ID":"b737a6c7-ee93-4bdc-a478-1ef7dd4bc9de","Type":"ContainerStarted","Data":"8b2ddb0e99ad1eef93e01046441db8b7b6d1528a51947b8155d4cde2ef50182d"} Sep 30 17:12:58 crc kubenswrapper[4818]: I0930 17:12:58.050248 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-fnfr4" podStartSLOduration=1.606648463 podStartE2EDuration="5.050219617s" podCreationTimestamp="2025-09-30 17:12:53 +0000 UTC" firstStartedPulling="2025-09-30 17:12:54.072135216 +0000 UTC m=+820.826407032" lastFinishedPulling="2025-09-30 17:12:57.51570635 +0000 UTC m=+824.269978186" observedRunningTime="2025-09-30 17:12:58.036448825 +0000 UTC m=+824.790720671" watchObservedRunningTime="2025-09-30 17:12:58.050219617 +0000 UTC m=+824.804491473" Sep 30 17:12:58 crc kubenswrapper[4818]: I0930 17:12:58.105300 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-nzt4h" Sep 30 17:13:00 crc kubenswrapper[4818]: I0930 17:13:00.078759 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nzt4h"] Sep 30 17:13:00 crc kubenswrapper[4818]: I0930 17:13:00.079651 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-nzt4h" podUID="33dec9db-5f6e-421d-a467-fa00eab01855" containerName="registry-server" containerID="cri-o://ba84e37236e59bcf2b093e68b58ed59d86bf342fafd97f94428c2de76e3cfef3" gracePeriod=2 Sep 30 17:13:00 crc kubenswrapper[4818]: I0930 17:13:00.477339 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nzt4h" Sep 30 17:13:00 crc kubenswrapper[4818]: I0930 17:13:00.583222 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33dec9db-5f6e-421d-a467-fa00eab01855-utilities\") pod \"33dec9db-5f6e-421d-a467-fa00eab01855\" (UID: \"33dec9db-5f6e-421d-a467-fa00eab01855\") " Sep 30 17:13:00 crc kubenswrapper[4818]: I0930 17:13:00.583378 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33dec9db-5f6e-421d-a467-fa00eab01855-catalog-content\") pod \"33dec9db-5f6e-421d-a467-fa00eab01855\" (UID: \"33dec9db-5f6e-421d-a467-fa00eab01855\") " Sep 30 17:13:00 crc kubenswrapper[4818]: I0930 17:13:00.583422 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kdsn4\" (UniqueName: \"kubernetes.io/projected/33dec9db-5f6e-421d-a467-fa00eab01855-kube-api-access-kdsn4\") pod \"33dec9db-5f6e-421d-a467-fa00eab01855\" (UID: \"33dec9db-5f6e-421d-a467-fa00eab01855\") " Sep 30 17:13:00 crc kubenswrapper[4818]: I0930 17:13:00.584202 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/33dec9db-5f6e-421d-a467-fa00eab01855-utilities" (OuterVolumeSpecName: "utilities") pod "33dec9db-5f6e-421d-a467-fa00eab01855" (UID: "33dec9db-5f6e-421d-a467-fa00eab01855"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:13:00 crc kubenswrapper[4818]: I0930 17:13:00.596677 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/33dec9db-5f6e-421d-a467-fa00eab01855-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "33dec9db-5f6e-421d-a467-fa00eab01855" (UID: "33dec9db-5f6e-421d-a467-fa00eab01855"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:13:00 crc kubenswrapper[4818]: I0930 17:13:00.606074 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33dec9db-5f6e-421d-a467-fa00eab01855-kube-api-access-kdsn4" (OuterVolumeSpecName: "kube-api-access-kdsn4") pod "33dec9db-5f6e-421d-a467-fa00eab01855" (UID: "33dec9db-5f6e-421d-a467-fa00eab01855"). InnerVolumeSpecName "kube-api-access-kdsn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:13:00 crc kubenswrapper[4818]: I0930 17:13:00.685210 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33dec9db-5f6e-421d-a467-fa00eab01855-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 17:13:00 crc kubenswrapper[4818]: I0930 17:13:00.685237 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33dec9db-5f6e-421d-a467-fa00eab01855-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 17:13:00 crc kubenswrapper[4818]: I0930 17:13:00.685249 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kdsn4\" (UniqueName: \"kubernetes.io/projected/33dec9db-5f6e-421d-a467-fa00eab01855-kube-api-access-kdsn4\") on node \"crc\" DevicePath \"\"" Sep 30 17:13:01 crc kubenswrapper[4818]: I0930 17:13:01.037300 4818 generic.go:334] "Generic (PLEG): container finished" podID="33dec9db-5f6e-421d-a467-fa00eab01855" containerID="ba84e37236e59bcf2b093e68b58ed59d86bf342fafd97f94428c2de76e3cfef3" exitCode=0 Sep 30 17:13:01 crc kubenswrapper[4818]: I0930 17:13:01.037350 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nzt4h" Sep 30 17:13:01 crc kubenswrapper[4818]: I0930 17:13:01.037382 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nzt4h" event={"ID":"33dec9db-5f6e-421d-a467-fa00eab01855","Type":"ContainerDied","Data":"ba84e37236e59bcf2b093e68b58ed59d86bf342fafd97f94428c2de76e3cfef3"} Sep 30 17:13:01 crc kubenswrapper[4818]: I0930 17:13:01.037438 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nzt4h" event={"ID":"33dec9db-5f6e-421d-a467-fa00eab01855","Type":"ContainerDied","Data":"a7519d34b7dfa3c8710b65d435d637fa42f3d1f8f516191141ac157d19cd3705"} Sep 30 17:13:01 crc kubenswrapper[4818]: I0930 17:13:01.037476 4818 scope.go:117] "RemoveContainer" containerID="ba84e37236e59bcf2b093e68b58ed59d86bf342fafd97f94428c2de76e3cfef3" Sep 30 17:13:01 crc kubenswrapper[4818]: I0930 17:13:01.074516 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nzt4h"] Sep 30 17:13:01 crc kubenswrapper[4818]: I0930 17:13:01.081113 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-nzt4h"] Sep 30 17:13:01 crc kubenswrapper[4818]: I0930 17:13:01.083975 4818 scope.go:117] "RemoveContainer" containerID="74bd09c88d3ae5d8f628ba5ebb534383b9196724d55f2d44942811f50329a0a7" Sep 30 17:13:01 crc kubenswrapper[4818]: I0930 17:13:01.105118 4818 scope.go:117] "RemoveContainer" containerID="57e061a8ad7613f83faec75fd551006c776ce7aa21a6921895e9e7704cf61091" Sep 30 17:13:01 crc kubenswrapper[4818]: I0930 17:13:01.132534 4818 scope.go:117] "RemoveContainer" containerID="ba84e37236e59bcf2b093e68b58ed59d86bf342fafd97f94428c2de76e3cfef3" Sep 30 17:13:01 crc kubenswrapper[4818]: E0930 17:13:01.133220 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba84e37236e59bcf2b093e68b58ed59d86bf342fafd97f94428c2de76e3cfef3\": container with ID starting with ba84e37236e59bcf2b093e68b58ed59d86bf342fafd97f94428c2de76e3cfef3 not found: ID does not exist" containerID="ba84e37236e59bcf2b093e68b58ed59d86bf342fafd97f94428c2de76e3cfef3" Sep 30 17:13:01 crc kubenswrapper[4818]: I0930 17:13:01.133259 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba84e37236e59bcf2b093e68b58ed59d86bf342fafd97f94428c2de76e3cfef3"} err="failed to get container status \"ba84e37236e59bcf2b093e68b58ed59d86bf342fafd97f94428c2de76e3cfef3\": rpc error: code = NotFound desc = could not find container \"ba84e37236e59bcf2b093e68b58ed59d86bf342fafd97f94428c2de76e3cfef3\": container with ID starting with ba84e37236e59bcf2b093e68b58ed59d86bf342fafd97f94428c2de76e3cfef3 not found: ID does not exist" Sep 30 17:13:01 crc kubenswrapper[4818]: I0930 17:13:01.133283 4818 scope.go:117] "RemoveContainer" containerID="74bd09c88d3ae5d8f628ba5ebb534383b9196724d55f2d44942811f50329a0a7" Sep 30 17:13:01 crc kubenswrapper[4818]: E0930 17:13:01.133786 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74bd09c88d3ae5d8f628ba5ebb534383b9196724d55f2d44942811f50329a0a7\": container with ID starting with 74bd09c88d3ae5d8f628ba5ebb534383b9196724d55f2d44942811f50329a0a7 not found: ID does not exist" containerID="74bd09c88d3ae5d8f628ba5ebb534383b9196724d55f2d44942811f50329a0a7" Sep 30 17:13:01 crc kubenswrapper[4818]: I0930 17:13:01.133847 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74bd09c88d3ae5d8f628ba5ebb534383b9196724d55f2d44942811f50329a0a7"} err="failed to get container status \"74bd09c88d3ae5d8f628ba5ebb534383b9196724d55f2d44942811f50329a0a7\": rpc error: code = NotFound desc = could not find container \"74bd09c88d3ae5d8f628ba5ebb534383b9196724d55f2d44942811f50329a0a7\": container with ID starting with 74bd09c88d3ae5d8f628ba5ebb534383b9196724d55f2d44942811f50329a0a7 not found: ID does not exist" Sep 30 17:13:01 crc kubenswrapper[4818]: I0930 17:13:01.133887 4818 scope.go:117] "RemoveContainer" containerID="57e061a8ad7613f83faec75fd551006c776ce7aa21a6921895e9e7704cf61091" Sep 30 17:13:01 crc kubenswrapper[4818]: E0930 17:13:01.134605 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57e061a8ad7613f83faec75fd551006c776ce7aa21a6921895e9e7704cf61091\": container with ID starting with 57e061a8ad7613f83faec75fd551006c776ce7aa21a6921895e9e7704cf61091 not found: ID does not exist" containerID="57e061a8ad7613f83faec75fd551006c776ce7aa21a6921895e9e7704cf61091" Sep 30 17:13:01 crc kubenswrapper[4818]: I0930 17:13:01.134647 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57e061a8ad7613f83faec75fd551006c776ce7aa21a6921895e9e7704cf61091"} err="failed to get container status \"57e061a8ad7613f83faec75fd551006c776ce7aa21a6921895e9e7704cf61091\": rpc error: code = NotFound desc = could not find container \"57e061a8ad7613f83faec75fd551006c776ce7aa21a6921895e9e7704cf61091\": container with ID starting with 57e061a8ad7613f83faec75fd551006c776ce7aa21a6921895e9e7704cf61091 not found: ID does not exist" Sep 30 17:13:02 crc kubenswrapper[4818]: I0930 17:13:02.029179 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33dec9db-5f6e-421d-a467-fa00eab01855" path="/var/lib/kubelet/pods/33dec9db-5f6e-421d-a467-fa00eab01855/volumes" Sep 30 17:13:02 crc kubenswrapper[4818]: I0930 17:13:02.119818 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-d969966f-8l42t"] Sep 30 17:13:02 crc kubenswrapper[4818]: E0930 17:13:02.120185 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33dec9db-5f6e-421d-a467-fa00eab01855" containerName="registry-server" Sep 30 17:13:02 crc kubenswrapper[4818]: I0930 17:13:02.120207 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="33dec9db-5f6e-421d-a467-fa00eab01855" containerName="registry-server" Sep 30 17:13:02 crc kubenswrapper[4818]: E0930 17:13:02.120231 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33dec9db-5f6e-421d-a467-fa00eab01855" containerName="extract-utilities" Sep 30 17:13:02 crc kubenswrapper[4818]: I0930 17:13:02.120244 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="33dec9db-5f6e-421d-a467-fa00eab01855" containerName="extract-utilities" Sep 30 17:13:02 crc kubenswrapper[4818]: E0930 17:13:02.120304 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33dec9db-5f6e-421d-a467-fa00eab01855" containerName="extract-content" Sep 30 17:13:02 crc kubenswrapper[4818]: I0930 17:13:02.120318 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="33dec9db-5f6e-421d-a467-fa00eab01855" containerName="extract-content" Sep 30 17:13:02 crc kubenswrapper[4818]: I0930 17:13:02.120510 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="33dec9db-5f6e-421d-a467-fa00eab01855" containerName="registry-server" Sep 30 17:13:02 crc kubenswrapper[4818]: I0930 17:13:02.122045 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-d969966f-8l42t" Sep 30 17:13:02 crc kubenswrapper[4818]: I0930 17:13:02.124907 4818 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-2wx9n" Sep 30 17:13:02 crc kubenswrapper[4818]: I0930 17:13:02.125215 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Sep 30 17:13:02 crc kubenswrapper[4818]: I0930 17:13:02.125540 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Sep 30 17:13:02 crc kubenswrapper[4818]: I0930 17:13:02.129895 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-d969966f-8l42t"] Sep 30 17:13:02 crc kubenswrapper[4818]: I0930 17:13:02.211538 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0ed641ab-239c-4e02-a872-800d04cb8655-bound-sa-token\") pod \"cert-manager-webhook-d969966f-8l42t\" (UID: \"0ed641ab-239c-4e02-a872-800d04cb8655\") " pod="cert-manager/cert-manager-webhook-d969966f-8l42t" Sep 30 17:13:02 crc kubenswrapper[4818]: I0930 17:13:02.211615 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcjr4\" (UniqueName: \"kubernetes.io/projected/0ed641ab-239c-4e02-a872-800d04cb8655-kube-api-access-qcjr4\") pod \"cert-manager-webhook-d969966f-8l42t\" (UID: \"0ed641ab-239c-4e02-a872-800d04cb8655\") " pod="cert-manager/cert-manager-webhook-d969966f-8l42t" Sep 30 17:13:02 crc kubenswrapper[4818]: I0930 17:13:02.313642 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0ed641ab-239c-4e02-a872-800d04cb8655-bound-sa-token\") pod \"cert-manager-webhook-d969966f-8l42t\" (UID: \"0ed641ab-239c-4e02-a872-800d04cb8655\") " pod="cert-manager/cert-manager-webhook-d969966f-8l42t" Sep 30 17:13:02 crc kubenswrapper[4818]: I0930 17:13:02.314226 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcjr4\" (UniqueName: \"kubernetes.io/projected/0ed641ab-239c-4e02-a872-800d04cb8655-kube-api-access-qcjr4\") pod \"cert-manager-webhook-d969966f-8l42t\" (UID: \"0ed641ab-239c-4e02-a872-800d04cb8655\") " pod="cert-manager/cert-manager-webhook-d969966f-8l42t" Sep 30 17:13:02 crc kubenswrapper[4818]: I0930 17:13:02.334456 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0ed641ab-239c-4e02-a872-800d04cb8655-bound-sa-token\") pod \"cert-manager-webhook-d969966f-8l42t\" (UID: \"0ed641ab-239c-4e02-a872-800d04cb8655\") " pod="cert-manager/cert-manager-webhook-d969966f-8l42t" Sep 30 17:13:02 crc kubenswrapper[4818]: I0930 17:13:02.337375 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcjr4\" (UniqueName: \"kubernetes.io/projected/0ed641ab-239c-4e02-a872-800d04cb8655-kube-api-access-qcjr4\") pod \"cert-manager-webhook-d969966f-8l42t\" (UID: \"0ed641ab-239c-4e02-a872-800d04cb8655\") " pod="cert-manager/cert-manager-webhook-d969966f-8l42t" Sep 30 17:13:02 crc kubenswrapper[4818]: I0930 17:13:02.440631 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-d969966f-8l42t" Sep 30 17:13:02 crc kubenswrapper[4818]: I0930 17:13:02.859337 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-d969966f-8l42t"] Sep 30 17:13:03 crc kubenswrapper[4818]: I0930 17:13:03.056398 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-d969966f-8l42t" event={"ID":"0ed641ab-239c-4e02-a872-800d04cb8655","Type":"ContainerStarted","Data":"9dafa52a81815772c2adc140f0bd8956726e87d8ab88cd351110c317c8ba4f2e"} Sep 30 17:13:05 crc kubenswrapper[4818]: I0930 17:13:05.227209 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7d9f95dbf-nhcf7"] Sep 30 17:13:05 crc kubenswrapper[4818]: I0930 17:13:05.228338 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7d9f95dbf-nhcf7" Sep 30 17:13:05 crc kubenswrapper[4818]: I0930 17:13:05.231279 4818 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-qdjs5" Sep 30 17:13:05 crc kubenswrapper[4818]: I0930 17:13:05.238385 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7d9f95dbf-nhcf7"] Sep 30 17:13:05 crc kubenswrapper[4818]: I0930 17:13:05.370799 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkcd5\" (UniqueName: \"kubernetes.io/projected/06937cd7-9130-454f-9a19-afa84e75da83-kube-api-access-dkcd5\") pod \"cert-manager-cainjector-7d9f95dbf-nhcf7\" (UID: \"06937cd7-9130-454f-9a19-afa84e75da83\") " pod="cert-manager/cert-manager-cainjector-7d9f95dbf-nhcf7" Sep 30 17:13:05 crc kubenswrapper[4818]: I0930 17:13:05.371061 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/06937cd7-9130-454f-9a19-afa84e75da83-bound-sa-token\") pod \"cert-manager-cainjector-7d9f95dbf-nhcf7\" (UID: \"06937cd7-9130-454f-9a19-afa84e75da83\") " pod="cert-manager/cert-manager-cainjector-7d9f95dbf-nhcf7" Sep 30 17:13:05 crc kubenswrapper[4818]: I0930 17:13:05.472758 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkcd5\" (UniqueName: \"kubernetes.io/projected/06937cd7-9130-454f-9a19-afa84e75da83-kube-api-access-dkcd5\") pod \"cert-manager-cainjector-7d9f95dbf-nhcf7\" (UID: \"06937cd7-9130-454f-9a19-afa84e75da83\") " pod="cert-manager/cert-manager-cainjector-7d9f95dbf-nhcf7" Sep 30 17:13:05 crc kubenswrapper[4818]: I0930 17:13:05.472841 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/06937cd7-9130-454f-9a19-afa84e75da83-bound-sa-token\") pod \"cert-manager-cainjector-7d9f95dbf-nhcf7\" (UID: \"06937cd7-9130-454f-9a19-afa84e75da83\") " pod="cert-manager/cert-manager-cainjector-7d9f95dbf-nhcf7" Sep 30 17:13:05 crc kubenswrapper[4818]: I0930 17:13:05.498015 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/06937cd7-9130-454f-9a19-afa84e75da83-bound-sa-token\") pod \"cert-manager-cainjector-7d9f95dbf-nhcf7\" (UID: \"06937cd7-9130-454f-9a19-afa84e75da83\") " pod="cert-manager/cert-manager-cainjector-7d9f95dbf-nhcf7" Sep 30 17:13:05 crc kubenswrapper[4818]: I0930 17:13:05.501497 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkcd5\" (UniqueName: \"kubernetes.io/projected/06937cd7-9130-454f-9a19-afa84e75da83-kube-api-access-dkcd5\") pod \"cert-manager-cainjector-7d9f95dbf-nhcf7\" (UID: \"06937cd7-9130-454f-9a19-afa84e75da83\") " pod="cert-manager/cert-manager-cainjector-7d9f95dbf-nhcf7" Sep 30 17:13:05 crc kubenswrapper[4818]: I0930 17:13:05.547538 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7d9f95dbf-nhcf7" Sep 30 17:13:06 crc kubenswrapper[4818]: I0930 17:13:06.089254 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5kvzx"] Sep 30 17:13:06 crc kubenswrapper[4818]: I0930 17:13:06.090721 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5kvzx" Sep 30 17:13:06 crc kubenswrapper[4818]: I0930 17:13:06.123851 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5kvzx"] Sep 30 17:13:06 crc kubenswrapper[4818]: I0930 17:13:06.193562 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-748xt\" (UniqueName: \"kubernetes.io/projected/68b6e87a-f43c-4203-ae40-95ca8dbbe372-kube-api-access-748xt\") pod \"certified-operators-5kvzx\" (UID: \"68b6e87a-f43c-4203-ae40-95ca8dbbe372\") " pod="openshift-marketplace/certified-operators-5kvzx" Sep 30 17:13:06 crc kubenswrapper[4818]: I0930 17:13:06.193700 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68b6e87a-f43c-4203-ae40-95ca8dbbe372-utilities\") pod \"certified-operators-5kvzx\" (UID: \"68b6e87a-f43c-4203-ae40-95ca8dbbe372\") " pod="openshift-marketplace/certified-operators-5kvzx" Sep 30 17:13:06 crc kubenswrapper[4818]: I0930 17:13:06.193727 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68b6e87a-f43c-4203-ae40-95ca8dbbe372-catalog-content\") pod \"certified-operators-5kvzx\" (UID: \"68b6e87a-f43c-4203-ae40-95ca8dbbe372\") " pod="openshift-marketplace/certified-operators-5kvzx" Sep 30 17:13:06 crc kubenswrapper[4818]: I0930 17:13:06.294511 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68b6e87a-f43c-4203-ae40-95ca8dbbe372-utilities\") pod \"certified-operators-5kvzx\" (UID: \"68b6e87a-f43c-4203-ae40-95ca8dbbe372\") " pod="openshift-marketplace/certified-operators-5kvzx" Sep 30 17:13:06 crc kubenswrapper[4818]: I0930 17:13:06.294566 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68b6e87a-f43c-4203-ae40-95ca8dbbe372-catalog-content\") pod \"certified-operators-5kvzx\" (UID: \"68b6e87a-f43c-4203-ae40-95ca8dbbe372\") " pod="openshift-marketplace/certified-operators-5kvzx" Sep 30 17:13:06 crc kubenswrapper[4818]: I0930 17:13:06.294611 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-748xt\" (UniqueName: \"kubernetes.io/projected/68b6e87a-f43c-4203-ae40-95ca8dbbe372-kube-api-access-748xt\") pod \"certified-operators-5kvzx\" (UID: \"68b6e87a-f43c-4203-ae40-95ca8dbbe372\") " pod="openshift-marketplace/certified-operators-5kvzx" Sep 30 17:13:06 crc kubenswrapper[4818]: I0930 17:13:06.295301 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68b6e87a-f43c-4203-ae40-95ca8dbbe372-utilities\") pod \"certified-operators-5kvzx\" (UID: \"68b6e87a-f43c-4203-ae40-95ca8dbbe372\") " pod="openshift-marketplace/certified-operators-5kvzx" Sep 30 17:13:06 crc kubenswrapper[4818]: I0930 17:13:06.295515 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68b6e87a-f43c-4203-ae40-95ca8dbbe372-catalog-content\") pod \"certified-operators-5kvzx\" (UID: \"68b6e87a-f43c-4203-ae40-95ca8dbbe372\") " pod="openshift-marketplace/certified-operators-5kvzx" Sep 30 17:13:06 crc kubenswrapper[4818]: I0930 17:13:06.321276 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-748xt\" (UniqueName: \"kubernetes.io/projected/68b6e87a-f43c-4203-ae40-95ca8dbbe372-kube-api-access-748xt\") pod \"certified-operators-5kvzx\" (UID: \"68b6e87a-f43c-4203-ae40-95ca8dbbe372\") " pod="openshift-marketplace/certified-operators-5kvzx" Sep 30 17:13:06 crc kubenswrapper[4818]: I0930 17:13:06.428132 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5kvzx" Sep 30 17:13:07 crc kubenswrapper[4818]: I0930 17:13:07.702709 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7d9f95dbf-nhcf7"] Sep 30 17:13:07 crc kubenswrapper[4818]: W0930 17:13:07.705151 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod06937cd7_9130_454f_9a19_afa84e75da83.slice/crio-5c448764fa3b02e0fcc2ff86f21636b74cad92f17438a269dcd4b13665b638a5 WatchSource:0}: Error finding container 5c448764fa3b02e0fcc2ff86f21636b74cad92f17438a269dcd4b13665b638a5: Status 404 returned error can't find the container with id 5c448764fa3b02e0fcc2ff86f21636b74cad92f17438a269dcd4b13665b638a5 Sep 30 17:13:07 crc kubenswrapper[4818]: I0930 17:13:07.800035 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5kvzx"] Sep 30 17:13:07 crc kubenswrapper[4818]: W0930 17:13:07.811213 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod68b6e87a_f43c_4203_ae40_95ca8dbbe372.slice/crio-170e2593659778fa153f58af01d69a1285ad8ed58a517f689d3ee121bfe5d38d WatchSource:0}: Error finding container 170e2593659778fa153f58af01d69a1285ad8ed58a517f689d3ee121bfe5d38d: Status 404 returned error can't find the container with id 170e2593659778fa153f58af01d69a1285ad8ed58a517f689d3ee121bfe5d38d Sep 30 17:13:08 crc kubenswrapper[4818]: I0930 17:13:08.112812 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-d969966f-8l42t" event={"ID":"0ed641ab-239c-4e02-a872-800d04cb8655","Type":"ContainerStarted","Data":"8f7b89b7a4618a3592379ddceaf7a039cb101d3c17bcdb2cace503ffecd8f0dd"} Sep 30 17:13:08 crc kubenswrapper[4818]: I0930 17:13:08.112945 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-d969966f-8l42t" Sep 30 17:13:08 crc kubenswrapper[4818]: I0930 17:13:08.116833 4818 generic.go:334] "Generic (PLEG): container finished" podID="68b6e87a-f43c-4203-ae40-95ca8dbbe372" containerID="e9b8ccf08d8db14a5da303dc28b39b8535569fdd1fdb06d2b47aec3455ec57f8" exitCode=0 Sep 30 17:13:08 crc kubenswrapper[4818]: I0930 17:13:08.116936 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5kvzx" event={"ID":"68b6e87a-f43c-4203-ae40-95ca8dbbe372","Type":"ContainerDied","Data":"e9b8ccf08d8db14a5da303dc28b39b8535569fdd1fdb06d2b47aec3455ec57f8"} Sep 30 17:13:08 crc kubenswrapper[4818]: I0930 17:13:08.116961 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5kvzx" event={"ID":"68b6e87a-f43c-4203-ae40-95ca8dbbe372","Type":"ContainerStarted","Data":"170e2593659778fa153f58af01d69a1285ad8ed58a517f689d3ee121bfe5d38d"} Sep 30 17:13:08 crc kubenswrapper[4818]: I0930 17:13:08.118852 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7d9f95dbf-nhcf7" event={"ID":"06937cd7-9130-454f-9a19-afa84e75da83","Type":"ContainerStarted","Data":"75bc3e5ffc4a93c7fc0676acaff3d670a10d72ea62756e8916aa66f06b9bf53e"} Sep 30 17:13:08 crc kubenswrapper[4818]: I0930 17:13:08.118945 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7d9f95dbf-nhcf7" event={"ID":"06937cd7-9130-454f-9a19-afa84e75da83","Type":"ContainerStarted","Data":"5c448764fa3b02e0fcc2ff86f21636b74cad92f17438a269dcd4b13665b638a5"} Sep 30 17:13:08 crc kubenswrapper[4818]: I0930 17:13:08.133180 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-d969966f-8l42t" podStartSLOduration=1.522989444 podStartE2EDuration="6.133155545s" podCreationTimestamp="2025-09-30 17:13:02 +0000 UTC" firstStartedPulling="2025-09-30 17:13:02.871243095 +0000 UTC m=+829.625514911" lastFinishedPulling="2025-09-30 17:13:07.481409206 +0000 UTC m=+834.235681012" observedRunningTime="2025-09-30 17:13:08.129537108 +0000 UTC m=+834.883808964" watchObservedRunningTime="2025-09-30 17:13:08.133155545 +0000 UTC m=+834.887427391" Sep 30 17:13:08 crc kubenswrapper[4818]: I0930 17:13:08.148412 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7d9f95dbf-nhcf7" podStartSLOduration=3.148395527 podStartE2EDuration="3.148395527s" podCreationTimestamp="2025-09-30 17:13:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:13:08.146354991 +0000 UTC m=+834.900626847" watchObservedRunningTime="2025-09-30 17:13:08.148395527 +0000 UTC m=+834.902667343" Sep 30 17:13:09 crc kubenswrapper[4818]: I0930 17:13:09.129262 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5kvzx" event={"ID":"68b6e87a-f43c-4203-ae40-95ca8dbbe372","Type":"ContainerStarted","Data":"8d29b652a6e721e13ec2ca759c7daf49258c43a37a08f36a16cf584d76ffa1db"} Sep 30 17:13:10 crc kubenswrapper[4818]: I0930 17:13:10.139459 4818 generic.go:334] "Generic (PLEG): container finished" podID="68b6e87a-f43c-4203-ae40-95ca8dbbe372" containerID="8d29b652a6e721e13ec2ca759c7daf49258c43a37a08f36a16cf584d76ffa1db" exitCode=0 Sep 30 17:13:10 crc kubenswrapper[4818]: I0930 17:13:10.139534 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5kvzx" event={"ID":"68b6e87a-f43c-4203-ae40-95ca8dbbe372","Type":"ContainerDied","Data":"8d29b652a6e721e13ec2ca759c7daf49258c43a37a08f36a16cf584d76ffa1db"} Sep 30 17:13:11 crc kubenswrapper[4818]: I0930 17:13:11.148033 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5kvzx" event={"ID":"68b6e87a-f43c-4203-ae40-95ca8dbbe372","Type":"ContainerStarted","Data":"d12e71a9eb881c06d0074c94943ee4293fea66e50a00c8dee98df74d8c2873d4"} Sep 30 17:13:11 crc kubenswrapper[4818]: I0930 17:13:11.162641 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5kvzx" podStartSLOduration=2.724018562 podStartE2EDuration="5.162622149s" podCreationTimestamp="2025-09-30 17:13:06 +0000 UTC" firstStartedPulling="2025-09-30 17:13:08.11999702 +0000 UTC m=+834.874268836" lastFinishedPulling="2025-09-30 17:13:10.558600597 +0000 UTC m=+837.312872423" observedRunningTime="2025-09-30 17:13:11.161972662 +0000 UTC m=+837.916244518" watchObservedRunningTime="2025-09-30 17:13:11.162622149 +0000 UTC m=+837.916893965" Sep 30 17:13:12 crc kubenswrapper[4818]: I0930 17:13:12.448146 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-d969966f-8l42t" Sep 30 17:13:16 crc kubenswrapper[4818]: I0930 17:13:16.428542 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5kvzx" Sep 30 17:13:16 crc kubenswrapper[4818]: I0930 17:13:16.429264 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5kvzx" Sep 30 17:13:16 crc kubenswrapper[4818]: I0930 17:13:16.498355 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5kvzx" Sep 30 17:13:17 crc kubenswrapper[4818]: I0930 17:13:17.298806 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5kvzx" Sep 30 17:13:17 crc kubenswrapper[4818]: I0930 17:13:17.739003 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5kvzx"] Sep 30 17:13:19 crc kubenswrapper[4818]: I0930 17:13:19.218888 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5kvzx" podUID="68b6e87a-f43c-4203-ae40-95ca8dbbe372" containerName="registry-server" containerID="cri-o://d12e71a9eb881c06d0074c94943ee4293fea66e50a00c8dee98df74d8c2873d4" gracePeriod=2 Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.197735 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5kvzx" Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.225028 4818 generic.go:334] "Generic (PLEG): container finished" podID="68b6e87a-f43c-4203-ae40-95ca8dbbe372" containerID="d12e71a9eb881c06d0074c94943ee4293fea66e50a00c8dee98df74d8c2873d4" exitCode=0 Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.225087 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5kvzx" event={"ID":"68b6e87a-f43c-4203-ae40-95ca8dbbe372","Type":"ContainerDied","Data":"d12e71a9eb881c06d0074c94943ee4293fea66e50a00c8dee98df74d8c2873d4"} Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.225126 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5kvzx" event={"ID":"68b6e87a-f43c-4203-ae40-95ca8dbbe372","Type":"ContainerDied","Data":"170e2593659778fa153f58af01d69a1285ad8ed58a517f689d3ee121bfe5d38d"} Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.225152 4818 scope.go:117] "RemoveContainer" containerID="d12e71a9eb881c06d0074c94943ee4293fea66e50a00c8dee98df74d8c2873d4" Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.225161 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5kvzx" Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.244101 4818 scope.go:117] "RemoveContainer" containerID="8d29b652a6e721e13ec2ca759c7daf49258c43a37a08f36a16cf584d76ffa1db" Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.265559 4818 scope.go:117] "RemoveContainer" containerID="e9b8ccf08d8db14a5da303dc28b39b8535569fdd1fdb06d2b47aec3455ec57f8" Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.282041 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68b6e87a-f43c-4203-ae40-95ca8dbbe372-utilities\") pod \"68b6e87a-f43c-4203-ae40-95ca8dbbe372\" (UID: \"68b6e87a-f43c-4203-ae40-95ca8dbbe372\") " Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.282134 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-748xt\" (UniqueName: \"kubernetes.io/projected/68b6e87a-f43c-4203-ae40-95ca8dbbe372-kube-api-access-748xt\") pod \"68b6e87a-f43c-4203-ae40-95ca8dbbe372\" (UID: \"68b6e87a-f43c-4203-ae40-95ca8dbbe372\") " Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.282266 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68b6e87a-f43c-4203-ae40-95ca8dbbe372-catalog-content\") pod \"68b6e87a-f43c-4203-ae40-95ca8dbbe372\" (UID: \"68b6e87a-f43c-4203-ae40-95ca8dbbe372\") " Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.283542 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/68b6e87a-f43c-4203-ae40-95ca8dbbe372-utilities" (OuterVolumeSpecName: "utilities") pod "68b6e87a-f43c-4203-ae40-95ca8dbbe372" (UID: "68b6e87a-f43c-4203-ae40-95ca8dbbe372"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.287648 4818 scope.go:117] "RemoveContainer" containerID="d12e71a9eb881c06d0074c94943ee4293fea66e50a00c8dee98df74d8c2873d4" Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.290322 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68b6e87a-f43c-4203-ae40-95ca8dbbe372-kube-api-access-748xt" (OuterVolumeSpecName: "kube-api-access-748xt") pod "68b6e87a-f43c-4203-ae40-95ca8dbbe372" (UID: "68b6e87a-f43c-4203-ae40-95ca8dbbe372"). InnerVolumeSpecName "kube-api-access-748xt". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:13:20 crc kubenswrapper[4818]: E0930 17:13:20.293303 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d12e71a9eb881c06d0074c94943ee4293fea66e50a00c8dee98df74d8c2873d4\": container with ID starting with d12e71a9eb881c06d0074c94943ee4293fea66e50a00c8dee98df74d8c2873d4 not found: ID does not exist" containerID="d12e71a9eb881c06d0074c94943ee4293fea66e50a00c8dee98df74d8c2873d4" Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.293330 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d12e71a9eb881c06d0074c94943ee4293fea66e50a00c8dee98df74d8c2873d4"} err="failed to get container status \"d12e71a9eb881c06d0074c94943ee4293fea66e50a00c8dee98df74d8c2873d4\": rpc error: code = NotFound desc = could not find container \"d12e71a9eb881c06d0074c94943ee4293fea66e50a00c8dee98df74d8c2873d4\": container with ID starting with d12e71a9eb881c06d0074c94943ee4293fea66e50a00c8dee98df74d8c2873d4 not found: ID does not exist" Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.293354 4818 scope.go:117] "RemoveContainer" containerID="8d29b652a6e721e13ec2ca759c7daf49258c43a37a08f36a16cf584d76ffa1db" Sep 30 17:13:20 crc kubenswrapper[4818]: E0930 17:13:20.293942 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d29b652a6e721e13ec2ca759c7daf49258c43a37a08f36a16cf584d76ffa1db\": container with ID starting with 8d29b652a6e721e13ec2ca759c7daf49258c43a37a08f36a16cf584d76ffa1db not found: ID does not exist" containerID="8d29b652a6e721e13ec2ca759c7daf49258c43a37a08f36a16cf584d76ffa1db" Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.293990 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d29b652a6e721e13ec2ca759c7daf49258c43a37a08f36a16cf584d76ffa1db"} err="failed to get container status \"8d29b652a6e721e13ec2ca759c7daf49258c43a37a08f36a16cf584d76ffa1db\": rpc error: code = NotFound desc = could not find container \"8d29b652a6e721e13ec2ca759c7daf49258c43a37a08f36a16cf584d76ffa1db\": container with ID starting with 8d29b652a6e721e13ec2ca759c7daf49258c43a37a08f36a16cf584d76ffa1db not found: ID does not exist" Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.294020 4818 scope.go:117] "RemoveContainer" containerID="e9b8ccf08d8db14a5da303dc28b39b8535569fdd1fdb06d2b47aec3455ec57f8" Sep 30 17:13:20 crc kubenswrapper[4818]: E0930 17:13:20.294361 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9b8ccf08d8db14a5da303dc28b39b8535569fdd1fdb06d2b47aec3455ec57f8\": container with ID starting with e9b8ccf08d8db14a5da303dc28b39b8535569fdd1fdb06d2b47aec3455ec57f8 not found: ID does not exist" containerID="e9b8ccf08d8db14a5da303dc28b39b8535569fdd1fdb06d2b47aec3455ec57f8" Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.294444 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9b8ccf08d8db14a5da303dc28b39b8535569fdd1fdb06d2b47aec3455ec57f8"} err="failed to get container status \"e9b8ccf08d8db14a5da303dc28b39b8535569fdd1fdb06d2b47aec3455ec57f8\": rpc error: code = NotFound desc = could not find container \"e9b8ccf08d8db14a5da303dc28b39b8535569fdd1fdb06d2b47aec3455ec57f8\": container with ID starting with e9b8ccf08d8db14a5da303dc28b39b8535569fdd1fdb06d2b47aec3455ec57f8 not found: ID does not exist" Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.325900 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/68b6e87a-f43c-4203-ae40-95ca8dbbe372-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "68b6e87a-f43c-4203-ae40-95ca8dbbe372" (UID: "68b6e87a-f43c-4203-ae40-95ca8dbbe372"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.384507 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68b6e87a-f43c-4203-ae40-95ca8dbbe372-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.384586 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68b6e87a-f43c-4203-ae40-95ca8dbbe372-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.384619 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-748xt\" (UniqueName: \"kubernetes.io/projected/68b6e87a-f43c-4203-ae40-95ca8dbbe372-kube-api-access-748xt\") on node \"crc\" DevicePath \"\"" Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.580019 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5kvzx"] Sep 30 17:13:20 crc kubenswrapper[4818]: I0930 17:13:20.587242 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5kvzx"] Sep 30 17:13:22 crc kubenswrapper[4818]: I0930 17:13:22.036354 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68b6e87a-f43c-4203-ae40-95ca8dbbe372" path="/var/lib/kubelet/pods/68b6e87a-f43c-4203-ae40-95ca8dbbe372/volumes" Sep 30 17:13:22 crc kubenswrapper[4818]: I0930 17:13:22.320627 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-7d4cc89fcb-lmzph"] Sep 30 17:13:22 crc kubenswrapper[4818]: E0930 17:13:22.322077 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68b6e87a-f43c-4203-ae40-95ca8dbbe372" containerName="extract-content" Sep 30 17:13:22 crc kubenswrapper[4818]: I0930 17:13:22.322363 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="68b6e87a-f43c-4203-ae40-95ca8dbbe372" containerName="extract-content" Sep 30 17:13:22 crc kubenswrapper[4818]: E0930 17:13:22.322478 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68b6e87a-f43c-4203-ae40-95ca8dbbe372" containerName="extract-utilities" Sep 30 17:13:22 crc kubenswrapper[4818]: I0930 17:13:22.322579 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="68b6e87a-f43c-4203-ae40-95ca8dbbe372" containerName="extract-utilities" Sep 30 17:13:22 crc kubenswrapper[4818]: E0930 17:13:22.322737 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68b6e87a-f43c-4203-ae40-95ca8dbbe372" containerName="registry-server" Sep 30 17:13:22 crc kubenswrapper[4818]: I0930 17:13:22.322830 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="68b6e87a-f43c-4203-ae40-95ca8dbbe372" containerName="registry-server" Sep 30 17:13:22 crc kubenswrapper[4818]: I0930 17:13:22.323162 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="68b6e87a-f43c-4203-ae40-95ca8dbbe372" containerName="registry-server" Sep 30 17:13:22 crc kubenswrapper[4818]: I0930 17:13:22.324235 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-7d4cc89fcb-lmzph" Sep 30 17:13:22 crc kubenswrapper[4818]: I0930 17:13:22.327009 4818 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-hczlt" Sep 30 17:13:22 crc kubenswrapper[4818]: I0930 17:13:22.329632 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-7d4cc89fcb-lmzph"] Sep 30 17:13:22 crc kubenswrapper[4818]: I0930 17:13:22.414765 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gt2g7\" (UniqueName: \"kubernetes.io/projected/75887178-d0ff-43b3-9fd3-e8674c7a5082-kube-api-access-gt2g7\") pod \"cert-manager-7d4cc89fcb-lmzph\" (UID: \"75887178-d0ff-43b3-9fd3-e8674c7a5082\") " pod="cert-manager/cert-manager-7d4cc89fcb-lmzph" Sep 30 17:13:22 crc kubenswrapper[4818]: I0930 17:13:22.415107 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/75887178-d0ff-43b3-9fd3-e8674c7a5082-bound-sa-token\") pod \"cert-manager-7d4cc89fcb-lmzph\" (UID: \"75887178-d0ff-43b3-9fd3-e8674c7a5082\") " pod="cert-manager/cert-manager-7d4cc89fcb-lmzph" Sep 30 17:13:22 crc kubenswrapper[4818]: I0930 17:13:22.516729 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gt2g7\" (UniqueName: \"kubernetes.io/projected/75887178-d0ff-43b3-9fd3-e8674c7a5082-kube-api-access-gt2g7\") pod \"cert-manager-7d4cc89fcb-lmzph\" (UID: \"75887178-d0ff-43b3-9fd3-e8674c7a5082\") " pod="cert-manager/cert-manager-7d4cc89fcb-lmzph" Sep 30 17:13:22 crc kubenswrapper[4818]: I0930 17:13:22.516821 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/75887178-d0ff-43b3-9fd3-e8674c7a5082-bound-sa-token\") pod \"cert-manager-7d4cc89fcb-lmzph\" (UID: \"75887178-d0ff-43b3-9fd3-e8674c7a5082\") " pod="cert-manager/cert-manager-7d4cc89fcb-lmzph" Sep 30 17:13:22 crc kubenswrapper[4818]: I0930 17:13:22.550802 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gt2g7\" (UniqueName: \"kubernetes.io/projected/75887178-d0ff-43b3-9fd3-e8674c7a5082-kube-api-access-gt2g7\") pod \"cert-manager-7d4cc89fcb-lmzph\" (UID: \"75887178-d0ff-43b3-9fd3-e8674c7a5082\") " pod="cert-manager/cert-manager-7d4cc89fcb-lmzph" Sep 30 17:13:22 crc kubenswrapper[4818]: I0930 17:13:22.551209 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/75887178-d0ff-43b3-9fd3-e8674c7a5082-bound-sa-token\") pod \"cert-manager-7d4cc89fcb-lmzph\" (UID: \"75887178-d0ff-43b3-9fd3-e8674c7a5082\") " pod="cert-manager/cert-manager-7d4cc89fcb-lmzph" Sep 30 17:13:22 crc kubenswrapper[4818]: I0930 17:13:22.645068 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-7d4cc89fcb-lmzph" Sep 30 17:13:23 crc kubenswrapper[4818]: I0930 17:13:23.079425 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-7d4cc89fcb-lmzph"] Sep 30 17:13:23 crc kubenswrapper[4818]: W0930 17:13:23.081856 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod75887178_d0ff_43b3_9fd3_e8674c7a5082.slice/crio-fd5fb819aef860d1b914ed5f3b5d82ac8a7fe78226f4c9847aa26bbb12dfd343 WatchSource:0}: Error finding container fd5fb819aef860d1b914ed5f3b5d82ac8a7fe78226f4c9847aa26bbb12dfd343: Status 404 returned error can't find the container with id fd5fb819aef860d1b914ed5f3b5d82ac8a7fe78226f4c9847aa26bbb12dfd343 Sep 30 17:13:23 crc kubenswrapper[4818]: I0930 17:13:23.251501 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-7d4cc89fcb-lmzph" event={"ID":"75887178-d0ff-43b3-9fd3-e8674c7a5082","Type":"ContainerStarted","Data":"fd5fb819aef860d1b914ed5f3b5d82ac8a7fe78226f4c9847aa26bbb12dfd343"} Sep 30 17:13:24 crc kubenswrapper[4818]: I0930 17:13:24.262764 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-7d4cc89fcb-lmzph" event={"ID":"75887178-d0ff-43b3-9fd3-e8674c7a5082","Type":"ContainerStarted","Data":"ea357e3131d54a8430d49fa1dc45435c483f3f63865415d3d864f39b2ae844e3"} Sep 30 17:13:24 crc kubenswrapper[4818]: I0930 17:13:24.291094 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-7d4cc89fcb-lmzph" podStartSLOduration=2.291068937 podStartE2EDuration="2.291068937s" podCreationTimestamp="2025-09-30 17:13:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:13:24.288412956 +0000 UTC m=+851.042684802" watchObservedRunningTime="2025-09-30 17:13:24.291068937 +0000 UTC m=+851.045340763" Sep 30 17:13:27 crc kubenswrapper[4818]: I0930 17:13:27.463159 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-9vqlr"] Sep 30 17:13:27 crc kubenswrapper[4818]: I0930 17:13:27.464693 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-9vqlr" Sep 30 17:13:27 crc kubenswrapper[4818]: I0930 17:13:27.468016 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Sep 30 17:13:27 crc kubenswrapper[4818]: I0930 17:13:27.471399 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-8hbwk" Sep 30 17:13:27 crc kubenswrapper[4818]: I0930 17:13:27.471601 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Sep 30 17:13:27 crc kubenswrapper[4818]: I0930 17:13:27.490445 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-9vqlr"] Sep 30 17:13:27 crc kubenswrapper[4818]: I0930 17:13:27.593253 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vw7c\" (UniqueName: \"kubernetes.io/projected/5a079e38-9dfa-4567-a8d0-27d500b30ac5-kube-api-access-4vw7c\") pod \"openstack-operator-index-9vqlr\" (UID: \"5a079e38-9dfa-4567-a8d0-27d500b30ac5\") " pod="openstack-operators/openstack-operator-index-9vqlr" Sep 30 17:13:27 crc kubenswrapper[4818]: I0930 17:13:27.695155 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vw7c\" (UniqueName: \"kubernetes.io/projected/5a079e38-9dfa-4567-a8d0-27d500b30ac5-kube-api-access-4vw7c\") pod \"openstack-operator-index-9vqlr\" (UID: \"5a079e38-9dfa-4567-a8d0-27d500b30ac5\") " pod="openstack-operators/openstack-operator-index-9vqlr" Sep 30 17:13:27 crc kubenswrapper[4818]: I0930 17:13:27.725045 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vw7c\" (UniqueName: \"kubernetes.io/projected/5a079e38-9dfa-4567-a8d0-27d500b30ac5-kube-api-access-4vw7c\") pod \"openstack-operator-index-9vqlr\" (UID: \"5a079e38-9dfa-4567-a8d0-27d500b30ac5\") " pod="openstack-operators/openstack-operator-index-9vqlr" Sep 30 17:13:27 crc kubenswrapper[4818]: I0930 17:13:27.791907 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-9vqlr" Sep 30 17:13:28 crc kubenswrapper[4818]: I0930 17:13:28.291036 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-9vqlr"] Sep 30 17:13:29 crc kubenswrapper[4818]: I0930 17:13:29.303692 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-9vqlr" event={"ID":"5a079e38-9dfa-4567-a8d0-27d500b30ac5","Type":"ContainerStarted","Data":"d666785f26408fa3da322c77df7717ec6cfb67a08826ea967e68576489b40644"} Sep 30 17:13:30 crc kubenswrapper[4818]: I0930 17:13:30.830552 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-9vqlr"] Sep 30 17:13:31 crc kubenswrapper[4818]: I0930 17:13:31.328187 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-9vqlr" event={"ID":"5a079e38-9dfa-4567-a8d0-27d500b30ac5","Type":"ContainerStarted","Data":"18a829a87c87cf27c84e771945b3ad3773a8cebcdaeaf02fd16e4b1f8699782a"} Sep 30 17:13:31 crc kubenswrapper[4818]: I0930 17:13:31.329110 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-9vqlr" podUID="5a079e38-9dfa-4567-a8d0-27d500b30ac5" containerName="registry-server" containerID="cri-o://18a829a87c87cf27c84e771945b3ad3773a8cebcdaeaf02fd16e4b1f8699782a" gracePeriod=2 Sep 30 17:13:31 crc kubenswrapper[4818]: I0930 17:13:31.362195 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-9vqlr" podStartSLOduration=1.6933254739999999 podStartE2EDuration="4.362164561s" podCreationTimestamp="2025-09-30 17:13:27 +0000 UTC" firstStartedPulling="2025-09-30 17:13:28.302344429 +0000 UTC m=+855.056616275" lastFinishedPulling="2025-09-30 17:13:30.971183496 +0000 UTC m=+857.725455362" observedRunningTime="2025-09-30 17:13:31.352553392 +0000 UTC m=+858.106825288" watchObservedRunningTime="2025-09-30 17:13:31.362164561 +0000 UTC m=+858.116436407" Sep 30 17:13:31 crc kubenswrapper[4818]: I0930 17:13:31.448457 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-cwj8d"] Sep 30 17:13:31 crc kubenswrapper[4818]: I0930 17:13:31.450444 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-cwj8d" Sep 30 17:13:31 crc kubenswrapper[4818]: I0930 17:13:31.462456 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-cwj8d"] Sep 30 17:13:31 crc kubenswrapper[4818]: I0930 17:13:31.556652 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87h47\" (UniqueName: \"kubernetes.io/projected/99558b8b-4cbe-4868-aca6-90e66d06160f-kube-api-access-87h47\") pod \"openstack-operator-index-cwj8d\" (UID: \"99558b8b-4cbe-4868-aca6-90e66d06160f\") " pod="openstack-operators/openstack-operator-index-cwj8d" Sep 30 17:13:31 crc kubenswrapper[4818]: I0930 17:13:31.657971 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87h47\" (UniqueName: \"kubernetes.io/projected/99558b8b-4cbe-4868-aca6-90e66d06160f-kube-api-access-87h47\") pod \"openstack-operator-index-cwj8d\" (UID: \"99558b8b-4cbe-4868-aca6-90e66d06160f\") " pod="openstack-operators/openstack-operator-index-cwj8d" Sep 30 17:13:31 crc kubenswrapper[4818]: I0930 17:13:31.684402 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87h47\" (UniqueName: \"kubernetes.io/projected/99558b8b-4cbe-4868-aca6-90e66d06160f-kube-api-access-87h47\") pod \"openstack-operator-index-cwj8d\" (UID: \"99558b8b-4cbe-4868-aca6-90e66d06160f\") " pod="openstack-operators/openstack-operator-index-cwj8d" Sep 30 17:13:31 crc kubenswrapper[4818]: I0930 17:13:31.770531 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-9vqlr" Sep 30 17:13:31 crc kubenswrapper[4818]: I0930 17:13:31.801358 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-cwj8d" Sep 30 17:13:31 crc kubenswrapper[4818]: I0930 17:13:31.861795 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vw7c\" (UniqueName: \"kubernetes.io/projected/5a079e38-9dfa-4567-a8d0-27d500b30ac5-kube-api-access-4vw7c\") pod \"5a079e38-9dfa-4567-a8d0-27d500b30ac5\" (UID: \"5a079e38-9dfa-4567-a8d0-27d500b30ac5\") " Sep 30 17:13:31 crc kubenswrapper[4818]: I0930 17:13:31.865510 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a079e38-9dfa-4567-a8d0-27d500b30ac5-kube-api-access-4vw7c" (OuterVolumeSpecName: "kube-api-access-4vw7c") pod "5a079e38-9dfa-4567-a8d0-27d500b30ac5" (UID: "5a079e38-9dfa-4567-a8d0-27d500b30ac5"). InnerVolumeSpecName "kube-api-access-4vw7c". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:13:31 crc kubenswrapper[4818]: I0930 17:13:31.963492 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vw7c\" (UniqueName: \"kubernetes.io/projected/5a079e38-9dfa-4567-a8d0-27d500b30ac5-kube-api-access-4vw7c\") on node \"crc\" DevicePath \"\"" Sep 30 17:13:32 crc kubenswrapper[4818]: I0930 17:13:32.204432 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-cwj8d"] Sep 30 17:13:32 crc kubenswrapper[4818]: W0930 17:13:32.221120 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod99558b8b_4cbe_4868_aca6_90e66d06160f.slice/crio-c9b0794acf7348737c21266f74456140f3a717315d9e1404dde2340951ed87e2 WatchSource:0}: Error finding container c9b0794acf7348737c21266f74456140f3a717315d9e1404dde2340951ed87e2: Status 404 returned error can't find the container with id c9b0794acf7348737c21266f74456140f3a717315d9e1404dde2340951ed87e2 Sep 30 17:13:32 crc kubenswrapper[4818]: I0930 17:13:32.338624 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-cwj8d" event={"ID":"99558b8b-4cbe-4868-aca6-90e66d06160f","Type":"ContainerStarted","Data":"c9b0794acf7348737c21266f74456140f3a717315d9e1404dde2340951ed87e2"} Sep 30 17:13:32 crc kubenswrapper[4818]: I0930 17:13:32.340860 4818 generic.go:334] "Generic (PLEG): container finished" podID="5a079e38-9dfa-4567-a8d0-27d500b30ac5" containerID="18a829a87c87cf27c84e771945b3ad3773a8cebcdaeaf02fd16e4b1f8699782a" exitCode=0 Sep 30 17:13:32 crc kubenswrapper[4818]: I0930 17:13:32.340904 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-9vqlr" event={"ID":"5a079e38-9dfa-4567-a8d0-27d500b30ac5","Type":"ContainerDied","Data":"18a829a87c87cf27c84e771945b3ad3773a8cebcdaeaf02fd16e4b1f8699782a"} Sep 30 17:13:32 crc kubenswrapper[4818]: I0930 17:13:32.341015 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-9vqlr" event={"ID":"5a079e38-9dfa-4567-a8d0-27d500b30ac5","Type":"ContainerDied","Data":"d666785f26408fa3da322c77df7717ec6cfb67a08826ea967e68576489b40644"} Sep 30 17:13:32 crc kubenswrapper[4818]: I0930 17:13:32.341048 4818 scope.go:117] "RemoveContainer" containerID="18a829a87c87cf27c84e771945b3ad3773a8cebcdaeaf02fd16e4b1f8699782a" Sep 30 17:13:32 crc kubenswrapper[4818]: I0930 17:13:32.341760 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-9vqlr" Sep 30 17:13:32 crc kubenswrapper[4818]: I0930 17:13:32.365265 4818 scope.go:117] "RemoveContainer" containerID="18a829a87c87cf27c84e771945b3ad3773a8cebcdaeaf02fd16e4b1f8699782a" Sep 30 17:13:32 crc kubenswrapper[4818]: E0930 17:13:32.366137 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18a829a87c87cf27c84e771945b3ad3773a8cebcdaeaf02fd16e4b1f8699782a\": container with ID starting with 18a829a87c87cf27c84e771945b3ad3773a8cebcdaeaf02fd16e4b1f8699782a not found: ID does not exist" containerID="18a829a87c87cf27c84e771945b3ad3773a8cebcdaeaf02fd16e4b1f8699782a" Sep 30 17:13:32 crc kubenswrapper[4818]: I0930 17:13:32.366186 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18a829a87c87cf27c84e771945b3ad3773a8cebcdaeaf02fd16e4b1f8699782a"} err="failed to get container status \"18a829a87c87cf27c84e771945b3ad3773a8cebcdaeaf02fd16e4b1f8699782a\": rpc error: code = NotFound desc = could not find container \"18a829a87c87cf27c84e771945b3ad3773a8cebcdaeaf02fd16e4b1f8699782a\": container with ID starting with 18a829a87c87cf27c84e771945b3ad3773a8cebcdaeaf02fd16e4b1f8699782a not found: ID does not exist" Sep 30 17:13:32 crc kubenswrapper[4818]: I0930 17:13:32.390857 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-9vqlr"] Sep 30 17:13:32 crc kubenswrapper[4818]: I0930 17:13:32.398438 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-9vqlr"] Sep 30 17:13:33 crc kubenswrapper[4818]: I0930 17:13:33.355026 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-cwj8d" event={"ID":"99558b8b-4cbe-4868-aca6-90e66d06160f","Type":"ContainerStarted","Data":"6fa15a7533ea3e8315e3eafeb84f1b8966b98424356f3f5dce00c30209cf5239"} Sep 30 17:13:33 crc kubenswrapper[4818]: I0930 17:13:33.380801 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-cwj8d" podStartSLOduration=2.330911285 podStartE2EDuration="2.380782017s" podCreationTimestamp="2025-09-30 17:13:31 +0000 UTC" firstStartedPulling="2025-09-30 17:13:32.225185081 +0000 UTC m=+858.979456917" lastFinishedPulling="2025-09-30 17:13:32.275055823 +0000 UTC m=+859.029327649" observedRunningTime="2025-09-30 17:13:33.371760554 +0000 UTC m=+860.126032410" watchObservedRunningTime="2025-09-30 17:13:33.380782017 +0000 UTC m=+860.135053843" Sep 30 17:13:34 crc kubenswrapper[4818]: I0930 17:13:34.029727 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a079e38-9dfa-4567-a8d0-27d500b30ac5" path="/var/lib/kubelet/pods/5a079e38-9dfa-4567-a8d0-27d500b30ac5/volumes" Sep 30 17:13:41 crc kubenswrapper[4818]: I0930 17:13:41.802255 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-cwj8d" Sep 30 17:13:41 crc kubenswrapper[4818]: I0930 17:13:41.802889 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-cwj8d" Sep 30 17:13:41 crc kubenswrapper[4818]: I0930 17:13:41.848088 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-cwj8d" Sep 30 17:13:42 crc kubenswrapper[4818]: I0930 17:13:42.474832 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-cwj8d" Sep 30 17:13:48 crc kubenswrapper[4818]: I0930 17:13:48.095942 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s"] Sep 30 17:13:48 crc kubenswrapper[4818]: E0930 17:13:48.096467 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a079e38-9dfa-4567-a8d0-27d500b30ac5" containerName="registry-server" Sep 30 17:13:48 crc kubenswrapper[4818]: I0930 17:13:48.096481 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a079e38-9dfa-4567-a8d0-27d500b30ac5" containerName="registry-server" Sep 30 17:13:48 crc kubenswrapper[4818]: I0930 17:13:48.096648 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a079e38-9dfa-4567-a8d0-27d500b30ac5" containerName="registry-server" Sep 30 17:13:48 crc kubenswrapper[4818]: I0930 17:13:48.097709 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s" Sep 30 17:13:48 crc kubenswrapper[4818]: I0930 17:13:48.099931 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-fsmvv" Sep 30 17:13:48 crc kubenswrapper[4818]: I0930 17:13:48.106246 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s"] Sep 30 17:13:48 crc kubenswrapper[4818]: I0930 17:13:48.193308 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqfhs\" (UniqueName: \"kubernetes.io/projected/f5c9df34-4128-4412-9f8f-cba05d9e7dd1-kube-api-access-cqfhs\") pod \"b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s\" (UID: \"f5c9df34-4128-4412-9f8f-cba05d9e7dd1\") " pod="openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s" Sep 30 17:13:48 crc kubenswrapper[4818]: I0930 17:13:48.193473 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f5c9df34-4128-4412-9f8f-cba05d9e7dd1-util\") pod \"b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s\" (UID: \"f5c9df34-4128-4412-9f8f-cba05d9e7dd1\") " pod="openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s" Sep 30 17:13:48 crc kubenswrapper[4818]: I0930 17:13:48.193520 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f5c9df34-4128-4412-9f8f-cba05d9e7dd1-bundle\") pod \"b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s\" (UID: \"f5c9df34-4128-4412-9f8f-cba05d9e7dd1\") " pod="openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s" Sep 30 17:13:48 crc kubenswrapper[4818]: I0930 17:13:48.295901 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f5c9df34-4128-4412-9f8f-cba05d9e7dd1-bundle\") pod \"b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s\" (UID: \"f5c9df34-4128-4412-9f8f-cba05d9e7dd1\") " pod="openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s" Sep 30 17:13:48 crc kubenswrapper[4818]: I0930 17:13:48.296005 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqfhs\" (UniqueName: \"kubernetes.io/projected/f5c9df34-4128-4412-9f8f-cba05d9e7dd1-kube-api-access-cqfhs\") pod \"b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s\" (UID: \"f5c9df34-4128-4412-9f8f-cba05d9e7dd1\") " pod="openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s" Sep 30 17:13:48 crc kubenswrapper[4818]: I0930 17:13:48.296073 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f5c9df34-4128-4412-9f8f-cba05d9e7dd1-util\") pod \"b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s\" (UID: \"f5c9df34-4128-4412-9f8f-cba05d9e7dd1\") " pod="openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s" Sep 30 17:13:48 crc kubenswrapper[4818]: I0930 17:13:48.296494 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f5c9df34-4128-4412-9f8f-cba05d9e7dd1-bundle\") pod \"b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s\" (UID: \"f5c9df34-4128-4412-9f8f-cba05d9e7dd1\") " pod="openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s" Sep 30 17:13:48 crc kubenswrapper[4818]: I0930 17:13:48.296507 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f5c9df34-4128-4412-9f8f-cba05d9e7dd1-util\") pod \"b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s\" (UID: \"f5c9df34-4128-4412-9f8f-cba05d9e7dd1\") " pod="openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s" Sep 30 17:13:48 crc kubenswrapper[4818]: I0930 17:13:48.339288 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqfhs\" (UniqueName: \"kubernetes.io/projected/f5c9df34-4128-4412-9f8f-cba05d9e7dd1-kube-api-access-cqfhs\") pod \"b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s\" (UID: \"f5c9df34-4128-4412-9f8f-cba05d9e7dd1\") " pod="openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s" Sep 30 17:13:48 crc kubenswrapper[4818]: I0930 17:13:48.424524 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s" Sep 30 17:13:48 crc kubenswrapper[4818]: I0930 17:13:48.841697 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s"] Sep 30 17:13:48 crc kubenswrapper[4818]: W0930 17:13:48.857779 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf5c9df34_4128_4412_9f8f_cba05d9e7dd1.slice/crio-2992305d5f4e20cd22df1a8c0e85a4494e7a5e30699b0b8aebe1819ae765f853 WatchSource:0}: Error finding container 2992305d5f4e20cd22df1a8c0e85a4494e7a5e30699b0b8aebe1819ae765f853: Status 404 returned error can't find the container with id 2992305d5f4e20cd22df1a8c0e85a4494e7a5e30699b0b8aebe1819ae765f853 Sep 30 17:13:49 crc kubenswrapper[4818]: I0930 17:13:49.493465 4818 generic.go:334] "Generic (PLEG): container finished" podID="f5c9df34-4128-4412-9f8f-cba05d9e7dd1" containerID="5e31aa73442755d620748212375c08a367bcb636725c75bbf72fa3030022118a" exitCode=0 Sep 30 17:13:49 crc kubenswrapper[4818]: I0930 17:13:49.493535 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s" event={"ID":"f5c9df34-4128-4412-9f8f-cba05d9e7dd1","Type":"ContainerDied","Data":"5e31aa73442755d620748212375c08a367bcb636725c75bbf72fa3030022118a"} Sep 30 17:13:49 crc kubenswrapper[4818]: I0930 17:13:49.493573 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s" event={"ID":"f5c9df34-4128-4412-9f8f-cba05d9e7dd1","Type":"ContainerStarted","Data":"2992305d5f4e20cd22df1a8c0e85a4494e7a5e30699b0b8aebe1819ae765f853"} Sep 30 17:13:50 crc kubenswrapper[4818]: I0930 17:13:50.512158 4818 generic.go:334] "Generic (PLEG): container finished" podID="f5c9df34-4128-4412-9f8f-cba05d9e7dd1" containerID="a07916cf5d988aac152df9ac9cb379ab37741d63720ada4673e0c412c7c4e92b" exitCode=0 Sep 30 17:13:50 crc kubenswrapper[4818]: I0930 17:13:50.512299 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s" event={"ID":"f5c9df34-4128-4412-9f8f-cba05d9e7dd1","Type":"ContainerDied","Data":"a07916cf5d988aac152df9ac9cb379ab37741d63720ada4673e0c412c7c4e92b"} Sep 30 17:13:51 crc kubenswrapper[4818]: I0930 17:13:51.519308 4818 generic.go:334] "Generic (PLEG): container finished" podID="f5c9df34-4128-4412-9f8f-cba05d9e7dd1" containerID="58f6f2bb07083f766a5e8e015a80f1eb246806e682f6865c5a8f8423b48c868b" exitCode=0 Sep 30 17:13:51 crc kubenswrapper[4818]: I0930 17:13:51.519677 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s" event={"ID":"f5c9df34-4128-4412-9f8f-cba05d9e7dd1","Type":"ContainerDied","Data":"58f6f2bb07083f766a5e8e015a80f1eb246806e682f6865c5a8f8423b48c868b"} Sep 30 17:13:52 crc kubenswrapper[4818]: I0930 17:13:52.596234 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:13:52 crc kubenswrapper[4818]: I0930 17:13:52.596321 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:13:52 crc kubenswrapper[4818]: I0930 17:13:52.890801 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s" Sep 30 17:13:52 crc kubenswrapper[4818]: I0930 17:13:52.966593 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f5c9df34-4128-4412-9f8f-cba05d9e7dd1-util\") pod \"f5c9df34-4128-4412-9f8f-cba05d9e7dd1\" (UID: \"f5c9df34-4128-4412-9f8f-cba05d9e7dd1\") " Sep 30 17:13:52 crc kubenswrapper[4818]: I0930 17:13:52.966643 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f5c9df34-4128-4412-9f8f-cba05d9e7dd1-bundle\") pod \"f5c9df34-4128-4412-9f8f-cba05d9e7dd1\" (UID: \"f5c9df34-4128-4412-9f8f-cba05d9e7dd1\") " Sep 30 17:13:52 crc kubenswrapper[4818]: I0930 17:13:52.966790 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqfhs\" (UniqueName: \"kubernetes.io/projected/f5c9df34-4128-4412-9f8f-cba05d9e7dd1-kube-api-access-cqfhs\") pod \"f5c9df34-4128-4412-9f8f-cba05d9e7dd1\" (UID: \"f5c9df34-4128-4412-9f8f-cba05d9e7dd1\") " Sep 30 17:13:52 crc kubenswrapper[4818]: I0930 17:13:52.967313 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5c9df34-4128-4412-9f8f-cba05d9e7dd1-bundle" (OuterVolumeSpecName: "bundle") pod "f5c9df34-4128-4412-9f8f-cba05d9e7dd1" (UID: "f5c9df34-4128-4412-9f8f-cba05d9e7dd1"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:13:52 crc kubenswrapper[4818]: I0930 17:13:52.975119 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5c9df34-4128-4412-9f8f-cba05d9e7dd1-kube-api-access-cqfhs" (OuterVolumeSpecName: "kube-api-access-cqfhs") pod "f5c9df34-4128-4412-9f8f-cba05d9e7dd1" (UID: "f5c9df34-4128-4412-9f8f-cba05d9e7dd1"). InnerVolumeSpecName "kube-api-access-cqfhs". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:13:52 crc kubenswrapper[4818]: I0930 17:13:52.980852 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5c9df34-4128-4412-9f8f-cba05d9e7dd1-util" (OuterVolumeSpecName: "util") pod "f5c9df34-4128-4412-9f8f-cba05d9e7dd1" (UID: "f5c9df34-4128-4412-9f8f-cba05d9e7dd1"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:13:53 crc kubenswrapper[4818]: I0930 17:13:53.068960 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqfhs\" (UniqueName: \"kubernetes.io/projected/f5c9df34-4128-4412-9f8f-cba05d9e7dd1-kube-api-access-cqfhs\") on node \"crc\" DevicePath \"\"" Sep 30 17:13:53 crc kubenswrapper[4818]: I0930 17:13:53.068998 4818 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f5c9df34-4128-4412-9f8f-cba05d9e7dd1-util\") on node \"crc\" DevicePath \"\"" Sep 30 17:13:53 crc kubenswrapper[4818]: I0930 17:13:53.069009 4818 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f5c9df34-4128-4412-9f8f-cba05d9e7dd1-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:13:53 crc kubenswrapper[4818]: I0930 17:13:53.535886 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s" event={"ID":"f5c9df34-4128-4412-9f8f-cba05d9e7dd1","Type":"ContainerDied","Data":"2992305d5f4e20cd22df1a8c0e85a4494e7a5e30699b0b8aebe1819ae765f853"} Sep 30 17:13:53 crc kubenswrapper[4818]: I0930 17:13:53.535963 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2992305d5f4e20cd22df1a8c0e85a4494e7a5e30699b0b8aebe1819ae765f853" Sep 30 17:13:53 crc kubenswrapper[4818]: I0930 17:13:53.535970 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s" Sep 30 17:14:01 crc kubenswrapper[4818]: I0930 17:14:01.802548 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh"] Sep 30 17:14:01 crc kubenswrapper[4818]: E0930 17:14:01.803658 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5c9df34-4128-4412-9f8f-cba05d9e7dd1" containerName="pull" Sep 30 17:14:01 crc kubenswrapper[4818]: I0930 17:14:01.803680 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5c9df34-4128-4412-9f8f-cba05d9e7dd1" containerName="pull" Sep 30 17:14:01 crc kubenswrapper[4818]: E0930 17:14:01.803705 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5c9df34-4128-4412-9f8f-cba05d9e7dd1" containerName="extract" Sep 30 17:14:01 crc kubenswrapper[4818]: I0930 17:14:01.803716 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5c9df34-4128-4412-9f8f-cba05d9e7dd1" containerName="extract" Sep 30 17:14:01 crc kubenswrapper[4818]: E0930 17:14:01.803732 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5c9df34-4128-4412-9f8f-cba05d9e7dd1" containerName="util" Sep 30 17:14:01 crc kubenswrapper[4818]: I0930 17:14:01.803743 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5c9df34-4128-4412-9f8f-cba05d9e7dd1" containerName="util" Sep 30 17:14:01 crc kubenswrapper[4818]: I0930 17:14:01.804043 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5c9df34-4128-4412-9f8f-cba05d9e7dd1" containerName="extract" Sep 30 17:14:01 crc kubenswrapper[4818]: I0930 17:14:01.805207 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh" Sep 30 17:14:01 crc kubenswrapper[4818]: I0930 17:14:01.819623 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-4z5l4" Sep 30 17:14:01 crc kubenswrapper[4818]: I0930 17:14:01.834684 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh"] Sep 30 17:14:01 crc kubenswrapper[4818]: I0930 17:14:01.896812 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tw6pj\" (UniqueName: \"kubernetes.io/projected/2c483975-9057-48f9-a5fb-54e905171d02-kube-api-access-tw6pj\") pod \"openstack-operator-controller-operator-dd685c6cd-b2nqh\" (UID: \"2c483975-9057-48f9-a5fb-54e905171d02\") " pod="openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh" Sep 30 17:14:01 crc kubenswrapper[4818]: I0930 17:14:01.997776 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tw6pj\" (UniqueName: \"kubernetes.io/projected/2c483975-9057-48f9-a5fb-54e905171d02-kube-api-access-tw6pj\") pod \"openstack-operator-controller-operator-dd685c6cd-b2nqh\" (UID: \"2c483975-9057-48f9-a5fb-54e905171d02\") " pod="openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh" Sep 30 17:14:02 crc kubenswrapper[4818]: I0930 17:14:02.026023 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tw6pj\" (UniqueName: \"kubernetes.io/projected/2c483975-9057-48f9-a5fb-54e905171d02-kube-api-access-tw6pj\") pod \"openstack-operator-controller-operator-dd685c6cd-b2nqh\" (UID: \"2c483975-9057-48f9-a5fb-54e905171d02\") " pod="openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh" Sep 30 17:14:02 crc kubenswrapper[4818]: I0930 17:14:02.176989 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh" Sep 30 17:14:02 crc kubenswrapper[4818]: I0930 17:14:02.600308 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh"] Sep 30 17:14:03 crc kubenswrapper[4818]: I0930 17:14:03.602288 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh" event={"ID":"2c483975-9057-48f9-a5fb-54e905171d02","Type":"ContainerStarted","Data":"b5d5a027c5bfde32200fc1c634a9c8f4a75a5e7701e7a9d571d0d7630784973d"} Sep 30 17:14:06 crc kubenswrapper[4818]: I0930 17:14:06.622403 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh" event={"ID":"2c483975-9057-48f9-a5fb-54e905171d02","Type":"ContainerStarted","Data":"970bd29c0824fecfae74119ad9caa7a608bc3e708308267cf57d3b937407f30f"} Sep 30 17:14:08 crc kubenswrapper[4818]: I0930 17:14:08.641957 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh" event={"ID":"2c483975-9057-48f9-a5fb-54e905171d02","Type":"ContainerStarted","Data":"3dd92a505ca99f530071d674256910242a81181ea0ac9ff04118e983d6d15fa1"} Sep 30 17:14:08 crc kubenswrapper[4818]: I0930 17:14:08.642354 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh" Sep 30 17:14:08 crc kubenswrapper[4818]: I0930 17:14:08.669650 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh" podStartSLOduration=1.876129875 podStartE2EDuration="7.66963106s" podCreationTimestamp="2025-09-30 17:14:01 +0000 UTC" firstStartedPulling="2025-09-30 17:14:02.611236805 +0000 UTC m=+889.365508621" lastFinishedPulling="2025-09-30 17:14:08.40473799 +0000 UTC m=+895.159009806" observedRunningTime="2025-09-30 17:14:08.665802967 +0000 UTC m=+895.420074813" watchObservedRunningTime="2025-09-30 17:14:08.66963106 +0000 UTC m=+895.423902876" Sep 30 17:14:12 crc kubenswrapper[4818]: I0930 17:14:12.180001 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh" Sep 30 17:14:22 crc kubenswrapper[4818]: I0930 17:14:22.596111 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:14:22 crc kubenswrapper[4818]: I0930 17:14:22.596788 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.232001 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6ff8b75857-q4g6w"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.233627 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-q4g6w" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.239355 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-68jb4" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.241836 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-644bddb6d8-45xpz"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.243297 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-45xpz" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.246384 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6ff8b75857-q4g6w"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.248490 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-nrvsx" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.261148 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-644bddb6d8-45xpz"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.290973 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-84f4f7b77b-tkdbz"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.291981 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-tkdbz" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.297002 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-s6l6w" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.311097 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-84f4f7b77b-tkdbz"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.313617 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nfw8c\" (UniqueName: \"kubernetes.io/projected/38a968a2-8f4f-4389-8ee1-852f92ffcb4b-kube-api-access-nfw8c\") pod \"barbican-operator-controller-manager-6ff8b75857-q4g6w\" (UID: \"38a968a2-8f4f-4389-8ee1-852f92ffcb4b\") " pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-q4g6w" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.327073 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-84958c4d49-r6vlr"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.328085 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-84958c4d49-r6vlr" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.333062 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-9sjcf" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.367011 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-84958c4d49-r6vlr"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.392015 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5d889d78cf-hgrdk"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.393904 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-hgrdk" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.400253 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-jzspb" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.402995 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-9f4696d94-8jqzd"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.417695 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4tdq\" (UniqueName: \"kubernetes.io/projected/7e2b35e1-f9e8-4ba0-88a5-9cbe8434942b-kube-api-access-j4tdq\") pod \"designate-operator-controller-manager-84f4f7b77b-tkdbz\" (UID: \"7e2b35e1-f9e8-4ba0-88a5-9cbe8434942b\") " pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-tkdbz" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.417787 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rw5h\" (UniqueName: \"kubernetes.io/projected/ef92660c-59b4-4bf2-ae84-1873db0c94b2-kube-api-access-5rw5h\") pod \"glance-operator-controller-manager-84958c4d49-r6vlr\" (UID: \"ef92660c-59b4-4bf2-ae84-1873db0c94b2\") " pod="openstack-operators/glance-operator-controller-manager-84958c4d49-r6vlr" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.417806 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lx8m\" (UniqueName: \"kubernetes.io/projected/04b289df-a81e-43b7-8aa1-66c50deeccf6-kube-api-access-8lx8m\") pod \"cinder-operator-controller-manager-644bddb6d8-45xpz\" (UID: \"04b289df-a81e-43b7-8aa1-66c50deeccf6\") " pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-45xpz" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.417836 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nfw8c\" (UniqueName: \"kubernetes.io/projected/38a968a2-8f4f-4389-8ee1-852f92ffcb4b-kube-api-access-nfw8c\") pod \"barbican-operator-controller-manager-6ff8b75857-q4g6w\" (UID: \"38a968a2-8f4f-4389-8ee1-852f92ffcb4b\") " pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-q4g6w" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.418272 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-8jqzd" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.432440 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-hq7vp" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.449989 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5d889d78cf-hgrdk"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.460189 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-9f4696d94-8jqzd"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.468250 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-7975b88857-6gvjh"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.469531 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-7975b88857-6gvjh" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.472841 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-wbhq6" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.505330 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nfw8c\" (UniqueName: \"kubernetes.io/projected/38a968a2-8f4f-4389-8ee1-852f92ffcb4b-kube-api-access-nfw8c\") pod \"barbican-operator-controller-manager-6ff8b75857-q4g6w\" (UID: \"38a968a2-8f4f-4389-8ee1-852f92ffcb4b\") " pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-q4g6w" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.505603 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-7d857cc749-d4c9x"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.507317 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-7d857cc749-d4c9x" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.516028 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5bd55b4bff-bk24d"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.517075 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-bk24d" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.518587 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4tdq\" (UniqueName: \"kubernetes.io/projected/7e2b35e1-f9e8-4ba0-88a5-9cbe8434942b-kube-api-access-j4tdq\") pod \"designate-operator-controller-manager-84f4f7b77b-tkdbz\" (UID: \"7e2b35e1-f9e8-4ba0-88a5-9cbe8434942b\") " pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-tkdbz" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.518677 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpjh7\" (UniqueName: \"kubernetes.io/projected/91d19c88-bc5e-4741-bae8-ac4cfcf5a3b8-kube-api-access-lpjh7\") pod \"horizon-operator-controller-manager-9f4696d94-8jqzd\" (UID: \"91d19c88-bc5e-4741-bae8-ac4cfcf5a3b8\") " pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-8jqzd" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.518719 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rw5h\" (UniqueName: \"kubernetes.io/projected/ef92660c-59b4-4bf2-ae84-1873db0c94b2-kube-api-access-5rw5h\") pod \"glance-operator-controller-manager-84958c4d49-r6vlr\" (UID: \"ef92660c-59b4-4bf2-ae84-1873db0c94b2\") " pod="openstack-operators/glance-operator-controller-manager-84958c4d49-r6vlr" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.518740 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lx8m\" (UniqueName: \"kubernetes.io/projected/04b289df-a81e-43b7-8aa1-66c50deeccf6-kube-api-access-8lx8m\") pod \"cinder-operator-controller-manager-644bddb6d8-45xpz\" (UID: \"04b289df-a81e-43b7-8aa1-66c50deeccf6\") " pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-45xpz" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.518781 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzqd4\" (UniqueName: \"kubernetes.io/projected/e8be5e01-df2e-4b18-a8bd-9b48b962f487-kube-api-access-hzqd4\") pod \"heat-operator-controller-manager-5d889d78cf-hgrdk\" (UID: \"e8be5e01-df2e-4b18-a8bd-9b48b962f487\") " pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-hgrdk" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.521811 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-7d857cc749-d4c9x"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.528608 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.528829 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-ttpf9" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.539343 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-5hxhh" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.545564 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-7975b88857-6gvjh"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.554395 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-q4g6w" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.558601 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4tdq\" (UniqueName: \"kubernetes.io/projected/7e2b35e1-f9e8-4ba0-88a5-9cbe8434942b-kube-api-access-j4tdq\") pod \"designate-operator-controller-manager-84f4f7b77b-tkdbz\" (UID: \"7e2b35e1-f9e8-4ba0-88a5-9cbe8434942b\") " pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-tkdbz" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.564552 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rw5h\" (UniqueName: \"kubernetes.io/projected/ef92660c-59b4-4bf2-ae84-1873db0c94b2-kube-api-access-5rw5h\") pod \"glance-operator-controller-manager-84958c4d49-r6vlr\" (UID: \"ef92660c-59b4-4bf2-ae84-1873db0c94b2\") " pod="openstack-operators/glance-operator-controller-manager-84958c4d49-r6vlr" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.564624 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5bd55b4bff-bk24d"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.594057 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-6d68dbc695-shjzt"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.595255 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lx8m\" (UniqueName: \"kubernetes.io/projected/04b289df-a81e-43b7-8aa1-66c50deeccf6-kube-api-access-8lx8m\") pod \"cinder-operator-controller-manager-644bddb6d8-45xpz\" (UID: \"04b289df-a81e-43b7-8aa1-66c50deeccf6\") " pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-45xpz" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.596339 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-shjzt" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.604989 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-88c7-2wcsq"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.606083 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-88c7-2wcsq" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.607882 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-cbhnt" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.611516 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-6d68dbc695-shjzt"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.611817 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-tkdbz" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.618079 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-88c7-2wcsq"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.621576 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzqd4\" (UniqueName: \"kubernetes.io/projected/e8be5e01-df2e-4b18-a8bd-9b48b962f487-kube-api-access-hzqd4\") pod \"heat-operator-controller-manager-5d889d78cf-hgrdk\" (UID: \"e8be5e01-df2e-4b18-a8bd-9b48b962f487\") " pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-hgrdk" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.621612 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8682f68d-8f1a-40c2-a06f-412cf86e26db-cert\") pod \"infra-operator-controller-manager-7d857cc749-d4c9x\" (UID: \"8682f68d-8f1a-40c2-a06f-412cf86e26db\") " pod="openstack-operators/infra-operator-controller-manager-7d857cc749-d4c9x" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.621648 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzpff\" (UniqueName: \"kubernetes.io/projected/8682f68d-8f1a-40c2-a06f-412cf86e26db-kube-api-access-hzpff\") pod \"infra-operator-controller-manager-7d857cc749-d4c9x\" (UID: \"8682f68d-8f1a-40c2-a06f-412cf86e26db\") " pod="openstack-operators/infra-operator-controller-manager-7d857cc749-d4c9x" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.621682 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d55j9\" (UniqueName: \"kubernetes.io/projected/d4069512-9cf0-4fd3-839a-4afc857dec61-kube-api-access-d55j9\") pod \"keystone-operator-controller-manager-5bd55b4bff-bk24d\" (UID: \"d4069512-9cf0-4fd3-839a-4afc857dec61\") " pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-bk24d" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.621725 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpjh7\" (UniqueName: \"kubernetes.io/projected/91d19c88-bc5e-4741-bae8-ac4cfcf5a3b8-kube-api-access-lpjh7\") pod \"horizon-operator-controller-manager-9f4696d94-8jqzd\" (UID: \"91d19c88-bc5e-4741-bae8-ac4cfcf5a3b8\") " pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-8jqzd" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.621748 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6n2h5\" (UniqueName: \"kubernetes.io/projected/609a391b-24d6-41f8-ad04-2c0a6e35de6b-kube-api-access-6n2h5\") pod \"ironic-operator-controller-manager-7975b88857-6gvjh\" (UID: \"609a391b-24d6-41f8-ad04-2c0a6e35de6b\") " pod="openstack-operators/ironic-operator-controller-manager-7975b88857-6gvjh" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.646864 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-hcv7g" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.651520 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-84958c4d49-r6vlr" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.659472 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-64d7b59854-v6kc9"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.660724 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-64d7b59854-v6kc9" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.670573 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpjh7\" (UniqueName: \"kubernetes.io/projected/91d19c88-bc5e-4741-bae8-ac4cfcf5a3b8-kube-api-access-lpjh7\") pod \"horizon-operator-controller-manager-9f4696d94-8jqzd\" (UID: \"91d19c88-bc5e-4741-bae8-ac4cfcf5a3b8\") " pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-8jqzd" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.674620 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-qcwvv" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.675534 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzqd4\" (UniqueName: \"kubernetes.io/projected/e8be5e01-df2e-4b18-a8bd-9b48b962f487-kube-api-access-hzqd4\") pod \"heat-operator-controller-manager-5d889d78cf-hgrdk\" (UID: \"e8be5e01-df2e-4b18-a8bd-9b48b962f487\") " pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-hgrdk" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.677003 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-64d7b59854-v6kc9"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.719214 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-c7c776c96-7kj24"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.720279 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-c7c776c96-7kj24" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.723511 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzpff\" (UniqueName: \"kubernetes.io/projected/8682f68d-8f1a-40c2-a06f-412cf86e26db-kube-api-access-hzpff\") pod \"infra-operator-controller-manager-7d857cc749-d4c9x\" (UID: \"8682f68d-8f1a-40c2-a06f-412cf86e26db\") " pod="openstack-operators/infra-operator-controller-manager-7d857cc749-d4c9x" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.723592 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d55j9\" (UniqueName: \"kubernetes.io/projected/d4069512-9cf0-4fd3-839a-4afc857dec61-kube-api-access-d55j9\") pod \"keystone-operator-controller-manager-5bd55b4bff-bk24d\" (UID: \"d4069512-9cf0-4fd3-839a-4afc857dec61\") " pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-bk24d" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.723637 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6n2h5\" (UniqueName: \"kubernetes.io/projected/609a391b-24d6-41f8-ad04-2c0a6e35de6b-kube-api-access-6n2h5\") pod \"ironic-operator-controller-manager-7975b88857-6gvjh\" (UID: \"609a391b-24d6-41f8-ad04-2c0a6e35de6b\") " pod="openstack-operators/ironic-operator-controller-manager-7975b88857-6gvjh" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.726604 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6zsv\" (UniqueName: \"kubernetes.io/projected/ea4afb9c-f223-4571-8d05-a4ed581c8116-kube-api-access-f6zsv\") pod \"manila-operator-controller-manager-6d68dbc695-shjzt\" (UID: \"ea4afb9c-f223-4571-8d05-a4ed581c8116\") " pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-shjzt" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.726767 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8682f68d-8f1a-40c2-a06f-412cf86e26db-cert\") pod \"infra-operator-controller-manager-7d857cc749-d4c9x\" (UID: \"8682f68d-8f1a-40c2-a06f-412cf86e26db\") " pod="openstack-operators/infra-operator-controller-manager-7d857cc749-d4c9x" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.726796 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdwls\" (UniqueName: \"kubernetes.io/projected/3707a523-c522-4172-9844-75e296641307-kube-api-access-zdwls\") pod \"mariadb-operator-controller-manager-88c7-2wcsq\" (UID: \"3707a523-c522-4172-9844-75e296641307\") " pod="openstack-operators/mariadb-operator-controller-manager-88c7-2wcsq" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.730459 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-gwjqg" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.738495 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8682f68d-8f1a-40c2-a06f-412cf86e26db-cert\") pod \"infra-operator-controller-manager-7d857cc749-d4c9x\" (UID: \"8682f68d-8f1a-40c2-a06f-412cf86e26db\") " pod="openstack-operators/infra-operator-controller-manager-7d857cc749-d4c9x" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.742289 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-lnnkn"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.743284 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-lnnkn" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.743762 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-hgrdk" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.753348 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-ks5ft" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.760485 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-c7c776c96-7kj24"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.760724 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-8jqzd" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.766813 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-lnnkn"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.791100 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6n2h5\" (UniqueName: \"kubernetes.io/projected/609a391b-24d6-41f8-ad04-2c0a6e35de6b-kube-api-access-6n2h5\") pod \"ironic-operator-controller-manager-7975b88857-6gvjh\" (UID: \"609a391b-24d6-41f8-ad04-2c0a6e35de6b\") " pod="openstack-operators/ironic-operator-controller-manager-7975b88857-6gvjh" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.795817 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d55j9\" (UniqueName: \"kubernetes.io/projected/d4069512-9cf0-4fd3-839a-4afc857dec61-kube-api-access-d55j9\") pod \"keystone-operator-controller-manager-5bd55b4bff-bk24d\" (UID: \"d4069512-9cf0-4fd3-839a-4afc857dec61\") " pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-bk24d" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.801121 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.803019 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.819470 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-v5mt4" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.819659 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.825497 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-589c58c6c-52hvk"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.826656 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-589c58c6c-52hvk" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.827083 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzpff\" (UniqueName: \"kubernetes.io/projected/8682f68d-8f1a-40c2-a06f-412cf86e26db-kube-api-access-hzpff\") pod \"infra-operator-controller-manager-7d857cc749-d4c9x\" (UID: \"8682f68d-8f1a-40c2-a06f-412cf86e26db\") " pod="openstack-operators/infra-operator-controller-manager-7d857cc749-d4c9x" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.827520 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6n7kn\" (UniqueName: \"kubernetes.io/projected/bd72fadd-ea9a-43ab-9817-83b6a33b60fb-kube-api-access-6n7kn\") pod \"neutron-operator-controller-manager-64d7b59854-v6kc9\" (UID: \"bd72fadd-ea9a-43ab-9817-83b6a33b60fb\") " pod="openstack-operators/neutron-operator-controller-manager-64d7b59854-v6kc9" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.827554 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6zsv\" (UniqueName: \"kubernetes.io/projected/ea4afb9c-f223-4571-8d05-a4ed581c8116-kube-api-access-f6zsv\") pod \"manila-operator-controller-manager-6d68dbc695-shjzt\" (UID: \"ea4afb9c-f223-4571-8d05-a4ed581c8116\") " pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-shjzt" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.827580 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8n5h\" (UniqueName: \"kubernetes.io/projected/211a9bb6-3ab5-47e6-92e4-32eb396dd4dc-kube-api-access-v8n5h\") pod \"nova-operator-controller-manager-c7c776c96-7kj24\" (UID: \"211a9bb6-3ab5-47e6-92e4-32eb396dd4dc\") " pod="openstack-operators/nova-operator-controller-manager-c7c776c96-7kj24" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.827615 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdwls\" (UniqueName: \"kubernetes.io/projected/3707a523-c522-4172-9844-75e296641307-kube-api-access-zdwls\") pod \"mariadb-operator-controller-manager-88c7-2wcsq\" (UID: \"3707a523-c522-4172-9844-75e296641307\") " pod="openstack-operators/mariadb-operator-controller-manager-88c7-2wcsq" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.827636 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qjkwz\" (UniqueName: \"kubernetes.io/projected/ec885f64-c0b7-4541-826e-2405c7a8c4e6-kube-api-access-qjkwz\") pod \"octavia-operator-controller-manager-76fcc6dc7c-lnnkn\" (UID: \"ec885f64-c0b7-4541-826e-2405c7a8c4e6\") " pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-lnnkn" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.834277 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-8ljmd" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.835315 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-9976ff44c-wn6hs"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.836771 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-wn6hs" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.839386 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-nhb5z" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.854035 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdwls\" (UniqueName: \"kubernetes.io/projected/3707a523-c522-4172-9844-75e296641307-kube-api-access-zdwls\") pod \"mariadb-operator-controller-manager-88c7-2wcsq\" (UID: \"3707a523-c522-4172-9844-75e296641307\") " pod="openstack-operators/mariadb-operator-controller-manager-88c7-2wcsq" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.871121 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-45xpz" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.872171 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6zsv\" (UniqueName: \"kubernetes.io/projected/ea4afb9c-f223-4571-8d05-a4ed581c8116-kube-api-access-f6zsv\") pod \"manila-operator-controller-manager-6d68dbc695-shjzt\" (UID: \"ea4afb9c-f223-4571-8d05-a4ed581c8116\") " pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-shjzt" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.910878 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.928505 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d11ea82b-7b70-49df-a288-673cb6ee9e9a-cert\") pod \"openstack-baremetal-operator-controller-manager-6d776955-2x6qj\" (UID: \"d11ea82b-7b70-49df-a288-673cb6ee9e9a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.928747 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcmvv\" (UniqueName: \"kubernetes.io/projected/4b6ffd9b-6e0e-44f3-bce2-19c1fdf8d0ee-kube-api-access-fcmvv\") pod \"placement-operator-controller-manager-589c58c6c-52hvk\" (UID: \"4b6ffd9b-6e0e-44f3-bce2-19c1fdf8d0ee\") " pod="openstack-operators/placement-operator-controller-manager-589c58c6c-52hvk" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.928885 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxkff\" (UniqueName: \"kubernetes.io/projected/d11ea82b-7b70-49df-a288-673cb6ee9e9a-kube-api-access-rxkff\") pod \"openstack-baremetal-operator-controller-manager-6d776955-2x6qj\" (UID: \"d11ea82b-7b70-49df-a288-673cb6ee9e9a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.929024 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6n7kn\" (UniqueName: \"kubernetes.io/projected/bd72fadd-ea9a-43ab-9817-83b6a33b60fb-kube-api-access-6n7kn\") pod \"neutron-operator-controller-manager-64d7b59854-v6kc9\" (UID: \"bd72fadd-ea9a-43ab-9817-83b6a33b60fb\") " pod="openstack-operators/neutron-operator-controller-manager-64d7b59854-v6kc9" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.929167 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8n5h\" (UniqueName: \"kubernetes.io/projected/211a9bb6-3ab5-47e6-92e4-32eb396dd4dc-kube-api-access-v8n5h\") pod \"nova-operator-controller-manager-c7c776c96-7kj24\" (UID: \"211a9bb6-3ab5-47e6-92e4-32eb396dd4dc\") " pod="openstack-operators/nova-operator-controller-manager-c7c776c96-7kj24" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.929282 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qjkwz\" (UniqueName: \"kubernetes.io/projected/ec885f64-c0b7-4541-826e-2405c7a8c4e6-kube-api-access-qjkwz\") pod \"octavia-operator-controller-manager-76fcc6dc7c-lnnkn\" (UID: \"ec885f64-c0b7-4541-826e-2405c7a8c4e6\") " pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-lnnkn" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.929387 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nnnx6\" (UniqueName: \"kubernetes.io/projected/92b0d181-cc90-43f8-a3a4-86a9a65b4c73-kube-api-access-nnnx6\") pod \"ovn-operator-controller-manager-9976ff44c-wn6hs\" (UID: \"92b0d181-cc90-43f8-a3a4-86a9a65b4c73\") " pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-wn6hs" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.931062 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-589c58c6c-52hvk"] Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.971624 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-7975b88857-6gvjh" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.986047 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8n5h\" (UniqueName: \"kubernetes.io/projected/211a9bb6-3ab5-47e6-92e4-32eb396dd4dc-kube-api-access-v8n5h\") pod \"nova-operator-controller-manager-c7c776c96-7kj24\" (UID: \"211a9bb6-3ab5-47e6-92e4-32eb396dd4dc\") " pod="openstack-operators/nova-operator-controller-manager-c7c776c96-7kj24" Sep 30 17:14:29 crc kubenswrapper[4818]: I0930 17:14:29.986323 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qjkwz\" (UniqueName: \"kubernetes.io/projected/ec885f64-c0b7-4541-826e-2405c7a8c4e6-kube-api-access-qjkwz\") pod \"octavia-operator-controller-manager-76fcc6dc7c-lnnkn\" (UID: \"ec885f64-c0b7-4541-826e-2405c7a8c4e6\") " pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-lnnkn" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.021026 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-9976ff44c-wn6hs"] Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.023191 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-7d857cc749-d4c9x" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.054739 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nnnx6\" (UniqueName: \"kubernetes.io/projected/92b0d181-cc90-43f8-a3a4-86a9a65b4c73-kube-api-access-nnnx6\") pod \"ovn-operator-controller-manager-9976ff44c-wn6hs\" (UID: \"92b0d181-cc90-43f8-a3a4-86a9a65b4c73\") " pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-wn6hs" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.054826 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d11ea82b-7b70-49df-a288-673cb6ee9e9a-cert\") pod \"openstack-baremetal-operator-controller-manager-6d776955-2x6qj\" (UID: \"d11ea82b-7b70-49df-a288-673cb6ee9e9a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.054898 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcmvv\" (UniqueName: \"kubernetes.io/projected/4b6ffd9b-6e0e-44f3-bce2-19c1fdf8d0ee-kube-api-access-fcmvv\") pod \"placement-operator-controller-manager-589c58c6c-52hvk\" (UID: \"4b6ffd9b-6e0e-44f3-bce2-19c1fdf8d0ee\") " pod="openstack-operators/placement-operator-controller-manager-589c58c6c-52hvk" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.055003 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxkff\" (UniqueName: \"kubernetes.io/projected/d11ea82b-7b70-49df-a288-673cb6ee9e9a-kube-api-access-rxkff\") pod \"openstack-baremetal-operator-controller-manager-6d776955-2x6qj\" (UID: \"d11ea82b-7b70-49df-a288-673cb6ee9e9a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj" Sep 30 17:14:30 crc kubenswrapper[4818]: E0930 17:14:30.055465 4818 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Sep 30 17:14:30 crc kubenswrapper[4818]: E0930 17:14:30.055511 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d11ea82b-7b70-49df-a288-673cb6ee9e9a-cert podName:d11ea82b-7b70-49df-a288-673cb6ee9e9a nodeName:}" failed. No retries permitted until 2025-09-30 17:14:30.555497186 +0000 UTC m=+917.309769002 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d11ea82b-7b70-49df-a288-673cb6ee9e9a-cert") pod "openstack-baremetal-operator-controller-manager-6d776955-2x6qj" (UID: "d11ea82b-7b70-49df-a288-673cb6ee9e9a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.055461 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6n7kn\" (UniqueName: \"kubernetes.io/projected/bd72fadd-ea9a-43ab-9817-83b6a33b60fb-kube-api-access-6n7kn\") pod \"neutron-operator-controller-manager-64d7b59854-v6kc9\" (UID: \"bd72fadd-ea9a-43ab-9817-83b6a33b60fb\") " pod="openstack-operators/neutron-operator-controller-manager-64d7b59854-v6kc9" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.056293 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-bk24d" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.078083 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxkff\" (UniqueName: \"kubernetes.io/projected/d11ea82b-7b70-49df-a288-673cb6ee9e9a-kube-api-access-rxkff\") pod \"openstack-baremetal-operator-controller-manager-6d776955-2x6qj\" (UID: \"d11ea82b-7b70-49df-a288-673cb6ee9e9a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.085479 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcmvv\" (UniqueName: \"kubernetes.io/projected/4b6ffd9b-6e0e-44f3-bce2-19c1fdf8d0ee-kube-api-access-fcmvv\") pod \"placement-operator-controller-manager-589c58c6c-52hvk\" (UID: \"4b6ffd9b-6e0e-44f3-bce2-19c1fdf8d0ee\") " pod="openstack-operators/placement-operator-controller-manager-589c58c6c-52hvk" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.087669 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-589c58c6c-52hvk" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.092680 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-shjzt" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.095831 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-bc7dc7bd9-jl7hb"] Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.096833 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-vn27d"] Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.097587 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-f66b554c6-lcj9j"] Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.098861 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-bc7dc7bd9-jl7hb"] Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.098947 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-f66b554c6-lcj9j" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.100586 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-jl7hb" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.101180 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-vn27d" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.102148 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-vx8st" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.102891 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-hdwrz" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.103236 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-m8dxf" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.106496 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nnnx6\" (UniqueName: \"kubernetes.io/projected/92b0d181-cc90-43f8-a3a4-86a9a65b4c73-kube-api-access-nnnx6\") pod \"ovn-operator-controller-manager-9976ff44c-wn6hs\" (UID: \"92b0d181-cc90-43f8-a3a4-86a9a65b4c73\") " pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-wn6hs" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.112886 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-vn27d"] Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.128447 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-88c7-2wcsq" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.148772 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-64d7b59854-v6kc9" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.186530 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-c7c776c96-7kj24" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.194286 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-f66b554c6-lcj9j"] Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.213241 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-lnnkn" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.220277 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc"] Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.221575 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.230149 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-2v76c" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.254128 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc"] Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.267654 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jp78d\" (UniqueName: \"kubernetes.io/projected/ac78c97f-d06d-4817-aba8-145f7ec5c3ee-kube-api-access-jp78d\") pod \"telemetry-operator-controller-manager-b8d54b5d7-vn27d\" (UID: \"ac78c97f-d06d-4817-aba8-145f7ec5c3ee\") " pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-vn27d" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.267715 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pcvnw\" (UniqueName: \"kubernetes.io/projected/02f33697-4faf-4e8d-8d78-77a6e8ad7d72-kube-api-access-pcvnw\") pod \"swift-operator-controller-manager-bc7dc7bd9-jl7hb\" (UID: \"02f33697-4faf-4e8d-8d78-77a6e8ad7d72\") " pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-jl7hb" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.267780 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwdkw\" (UniqueName: \"kubernetes.io/projected/43743bf4-0c17-4266-b31d-a17cf0e6d330-kube-api-access-fwdkw\") pod \"test-operator-controller-manager-f66b554c6-lcj9j\" (UID: \"43743bf4-0c17-4266-b31d-a17cf0e6d330\") " pod="openstack-operators/test-operator-controller-manager-f66b554c6-lcj9j" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.326287 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-b7d9776bd-qhghl"] Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.327461 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-b7d9776bd-qhghl" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.333977 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.334279 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-ctrct" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.335988 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-b7d9776bd-qhghl"] Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.357235 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-dq42t"] Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.361606 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-dq42t" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.363241 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-p96kt" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.368790 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-dq42t"] Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.369240 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6t5b\" (UniqueName: \"kubernetes.io/projected/d94046f7-ca49-45f5-970e-e85346bbc77b-kube-api-access-t6t5b\") pod \"watcher-operator-controller-manager-ff74f75dc-kjsvc\" (UID: \"d94046f7-ca49-45f5-970e-e85346bbc77b\") " pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.371258 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jp78d\" (UniqueName: \"kubernetes.io/projected/ac78c97f-d06d-4817-aba8-145f7ec5c3ee-kube-api-access-jp78d\") pod \"telemetry-operator-controller-manager-b8d54b5d7-vn27d\" (UID: \"ac78c97f-d06d-4817-aba8-145f7ec5c3ee\") " pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-vn27d" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.371305 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pcvnw\" (UniqueName: \"kubernetes.io/projected/02f33697-4faf-4e8d-8d78-77a6e8ad7d72-kube-api-access-pcvnw\") pod \"swift-operator-controller-manager-bc7dc7bd9-jl7hb\" (UID: \"02f33697-4faf-4e8d-8d78-77a6e8ad7d72\") " pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-jl7hb" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.371354 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwdkw\" (UniqueName: \"kubernetes.io/projected/43743bf4-0c17-4266-b31d-a17cf0e6d330-kube-api-access-fwdkw\") pod \"test-operator-controller-manager-f66b554c6-lcj9j\" (UID: \"43743bf4-0c17-4266-b31d-a17cf0e6d330\") " pod="openstack-operators/test-operator-controller-manager-f66b554c6-lcj9j" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.396389 4818 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.402877 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-wn6hs" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.408714 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pcvnw\" (UniqueName: \"kubernetes.io/projected/02f33697-4faf-4e8d-8d78-77a6e8ad7d72-kube-api-access-pcvnw\") pod \"swift-operator-controller-manager-bc7dc7bd9-jl7hb\" (UID: \"02f33697-4faf-4e8d-8d78-77a6e8ad7d72\") " pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-jl7hb" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.412025 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwdkw\" (UniqueName: \"kubernetes.io/projected/43743bf4-0c17-4266-b31d-a17cf0e6d330-kube-api-access-fwdkw\") pod \"test-operator-controller-manager-f66b554c6-lcj9j\" (UID: \"43743bf4-0c17-4266-b31d-a17cf0e6d330\") " pod="openstack-operators/test-operator-controller-manager-f66b554c6-lcj9j" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.416443 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jp78d\" (UniqueName: \"kubernetes.io/projected/ac78c97f-d06d-4817-aba8-145f7ec5c3ee-kube-api-access-jp78d\") pod \"telemetry-operator-controller-manager-b8d54b5d7-vn27d\" (UID: \"ac78c97f-d06d-4817-aba8-145f7ec5c3ee\") " pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-vn27d" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.422798 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6ff8b75857-q4g6w"] Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.430472 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-f66b554c6-lcj9j" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.451518 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-jl7hb" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.467277 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-vn27d" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.472712 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqskm\" (UniqueName: \"kubernetes.io/projected/347237b8-ee53-432a-932b-d7e2488b253f-kube-api-access-wqskm\") pod \"rabbitmq-cluster-operator-manager-79d8469568-dq42t\" (UID: \"347237b8-ee53-432a-932b-d7e2488b253f\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-dq42t" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.472896 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/884fad81-43e8-4b9c-b517-83d24d16f9cd-cert\") pod \"openstack-operator-controller-manager-b7d9776bd-qhghl\" (UID: \"884fad81-43e8-4b9c-b517-83d24d16f9cd\") " pod="openstack-operators/openstack-operator-controller-manager-b7d9776bd-qhghl" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.473008 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6t5b\" (UniqueName: \"kubernetes.io/projected/d94046f7-ca49-45f5-970e-e85346bbc77b-kube-api-access-t6t5b\") pod \"watcher-operator-controller-manager-ff74f75dc-kjsvc\" (UID: \"d94046f7-ca49-45f5-970e-e85346bbc77b\") " pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.473132 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xxk2\" (UniqueName: \"kubernetes.io/projected/884fad81-43e8-4b9c-b517-83d24d16f9cd-kube-api-access-2xxk2\") pod \"openstack-operator-controller-manager-b7d9776bd-qhghl\" (UID: \"884fad81-43e8-4b9c-b517-83d24d16f9cd\") " pod="openstack-operators/openstack-operator-controller-manager-b7d9776bd-qhghl" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.487015 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-84f4f7b77b-tkdbz"] Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.503446 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6t5b\" (UniqueName: \"kubernetes.io/projected/d94046f7-ca49-45f5-970e-e85346bbc77b-kube-api-access-t6t5b\") pod \"watcher-operator-controller-manager-ff74f75dc-kjsvc\" (UID: \"d94046f7-ca49-45f5-970e-e85346bbc77b\") " pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.558454 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.576759 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqskm\" (UniqueName: \"kubernetes.io/projected/347237b8-ee53-432a-932b-d7e2488b253f-kube-api-access-wqskm\") pod \"rabbitmq-cluster-operator-manager-79d8469568-dq42t\" (UID: \"347237b8-ee53-432a-932b-d7e2488b253f\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-dq42t" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.576830 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d11ea82b-7b70-49df-a288-673cb6ee9e9a-cert\") pod \"openstack-baremetal-operator-controller-manager-6d776955-2x6qj\" (UID: \"d11ea82b-7b70-49df-a288-673cb6ee9e9a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.576860 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/884fad81-43e8-4b9c-b517-83d24d16f9cd-cert\") pod \"openstack-operator-controller-manager-b7d9776bd-qhghl\" (UID: \"884fad81-43e8-4b9c-b517-83d24d16f9cd\") " pod="openstack-operators/openstack-operator-controller-manager-b7d9776bd-qhghl" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.576912 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xxk2\" (UniqueName: \"kubernetes.io/projected/884fad81-43e8-4b9c-b517-83d24d16f9cd-kube-api-access-2xxk2\") pod \"openstack-operator-controller-manager-b7d9776bd-qhghl\" (UID: \"884fad81-43e8-4b9c-b517-83d24d16f9cd\") " pod="openstack-operators/openstack-operator-controller-manager-b7d9776bd-qhghl" Sep 30 17:14:30 crc kubenswrapper[4818]: E0930 17:14:30.577493 4818 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Sep 30 17:14:30 crc kubenswrapper[4818]: E0930 17:14:30.577564 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d11ea82b-7b70-49df-a288-673cb6ee9e9a-cert podName:d11ea82b-7b70-49df-a288-673cb6ee9e9a nodeName:}" failed. No retries permitted until 2025-09-30 17:14:31.577522327 +0000 UTC m=+918.331794143 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d11ea82b-7b70-49df-a288-673cb6ee9e9a-cert") pod "openstack-baremetal-operator-controller-manager-6d776955-2x6qj" (UID: "d11ea82b-7b70-49df-a288-673cb6ee9e9a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Sep 30 17:14:30 crc kubenswrapper[4818]: E0930 17:14:30.577821 4818 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Sep 30 17:14:30 crc kubenswrapper[4818]: E0930 17:14:30.577856 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/884fad81-43e8-4b9c-b517-83d24d16f9cd-cert podName:884fad81-43e8-4b9c-b517-83d24d16f9cd nodeName:}" failed. No retries permitted until 2025-09-30 17:14:31.077847746 +0000 UTC m=+917.832119562 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/884fad81-43e8-4b9c-b517-83d24d16f9cd-cert") pod "openstack-operator-controller-manager-b7d9776bd-qhghl" (UID: "884fad81-43e8-4b9c-b517-83d24d16f9cd") : secret "webhook-server-cert" not found Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.610518 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqskm\" (UniqueName: \"kubernetes.io/projected/347237b8-ee53-432a-932b-d7e2488b253f-kube-api-access-wqskm\") pod \"rabbitmq-cluster-operator-manager-79d8469568-dq42t\" (UID: \"347237b8-ee53-432a-932b-d7e2488b253f\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-dq42t" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.633684 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xxk2\" (UniqueName: \"kubernetes.io/projected/884fad81-43e8-4b9c-b517-83d24d16f9cd-kube-api-access-2xxk2\") pod \"openstack-operator-controller-manager-b7d9776bd-qhghl\" (UID: \"884fad81-43e8-4b9c-b517-83d24d16f9cd\") " pod="openstack-operators/openstack-operator-controller-manager-b7d9776bd-qhghl" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.739464 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-dq42t" Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.842615 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-tkdbz" event={"ID":"7e2b35e1-f9e8-4ba0-88a5-9cbe8434942b","Type":"ContainerStarted","Data":"95cf1667c0d01b3e81438c644cb0deb163e54fd58b271e6c4bf5726f5f80ddcc"} Sep 30 17:14:30 crc kubenswrapper[4818]: I0930 17:14:30.844503 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-q4g6w" event={"ID":"38a968a2-8f4f-4389-8ee1-852f92ffcb4b","Type":"ContainerStarted","Data":"e6db4c8915cf103621672129dd3607156af66b688e7e9fc5d982f0b1dc44dcdd"} Sep 30 17:14:31 crc kubenswrapper[4818]: I0930 17:14:31.086526 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/884fad81-43e8-4b9c-b517-83d24d16f9cd-cert\") pod \"openstack-operator-controller-manager-b7d9776bd-qhghl\" (UID: \"884fad81-43e8-4b9c-b517-83d24d16f9cd\") " pod="openstack-operators/openstack-operator-controller-manager-b7d9776bd-qhghl" Sep 30 17:14:31 crc kubenswrapper[4818]: E0930 17:14:31.086766 4818 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Sep 30 17:14:31 crc kubenswrapper[4818]: E0930 17:14:31.086812 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/884fad81-43e8-4b9c-b517-83d24d16f9cd-cert podName:884fad81-43e8-4b9c-b517-83d24d16f9cd nodeName:}" failed. No retries permitted until 2025-09-30 17:14:32.086797835 +0000 UTC m=+918.841069651 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/884fad81-43e8-4b9c-b517-83d24d16f9cd-cert") pod "openstack-operator-controller-manager-b7d9776bd-qhghl" (UID: "884fad81-43e8-4b9c-b517-83d24d16f9cd") : secret "webhook-server-cert" not found Sep 30 17:14:31 crc kubenswrapper[4818]: I0930 17:14:31.551164 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-84958c4d49-r6vlr"] Sep 30 17:14:31 crc kubenswrapper[4818]: I0930 17:14:31.568271 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-9f4696d94-8jqzd"] Sep 30 17:14:31 crc kubenswrapper[4818]: I0930 17:14:31.599714 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d11ea82b-7b70-49df-a288-673cb6ee9e9a-cert\") pod \"openstack-baremetal-operator-controller-manager-6d776955-2x6qj\" (UID: \"d11ea82b-7b70-49df-a288-673cb6ee9e9a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj" Sep 30 17:14:31 crc kubenswrapper[4818]: I0930 17:14:31.610402 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d11ea82b-7b70-49df-a288-673cb6ee9e9a-cert\") pod \"openstack-baremetal-operator-controller-manager-6d776955-2x6qj\" (UID: \"d11ea82b-7b70-49df-a288-673cb6ee9e9a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj" Sep 30 17:14:31 crc kubenswrapper[4818]: I0930 17:14:31.635918 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5d889d78cf-hgrdk"] Sep 30 17:14:31 crc kubenswrapper[4818]: W0930 17:14:31.644121 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode8be5e01_df2e_4b18_a8bd_9b48b962f487.slice/crio-9d979e6acb4691ed68e0c90071be4e735c4d46cbb158572413550ab88e5a780a WatchSource:0}: Error finding container 9d979e6acb4691ed68e0c90071be4e735c4d46cbb158572413550ab88e5a780a: Status 404 returned error can't find the container with id 9d979e6acb4691ed68e0c90071be4e735c4d46cbb158572413550ab88e5a780a Sep 30 17:14:31 crc kubenswrapper[4818]: I0930 17:14:31.738949 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj" Sep 30 17:14:31 crc kubenswrapper[4818]: I0930 17:14:31.864116 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-hgrdk" event={"ID":"e8be5e01-df2e-4b18-a8bd-9b48b962f487","Type":"ContainerStarted","Data":"9d979e6acb4691ed68e0c90071be4e735c4d46cbb158572413550ab88e5a780a"} Sep 30 17:14:31 crc kubenswrapper[4818]: I0930 17:14:31.872895 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-8jqzd" event={"ID":"91d19c88-bc5e-4741-bae8-ac4cfcf5a3b8","Type":"ContainerStarted","Data":"e67bb18fa7bf7d037951eb60e884c85cd13fcba3c224d71178ea4069dff1c893"} Sep 30 17:14:31 crc kubenswrapper[4818]: I0930 17:14:31.884178 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-84958c4d49-r6vlr" event={"ID":"ef92660c-59b4-4bf2-ae84-1873db0c94b2","Type":"ContainerStarted","Data":"e003e7d4f28bdf856bc23a813e8f114b3c9e5456f02119ff10bab6019798faaa"} Sep 30 17:14:31 crc kubenswrapper[4818]: I0930 17:14:31.884806 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-644bddb6d8-45xpz"] Sep 30 17:14:31 crc kubenswrapper[4818]: W0930 17:14:31.894159 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod04b289df_a81e_43b7_8aa1_66c50deeccf6.slice/crio-b7edb3005053599f8b1743714692ff9dea548146e2e2dc371c2c991cc0cf517f WatchSource:0}: Error finding container b7edb3005053599f8b1743714692ff9dea548146e2e2dc371c2c991cc0cf517f: Status 404 returned error can't find the container with id b7edb3005053599f8b1743714692ff9dea548146e2e2dc371c2c991cc0cf517f Sep 30 17:14:31 crc kubenswrapper[4818]: W0930 17:14:31.920185 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4b6ffd9b_6e0e_44f3_bce2_19c1fdf8d0ee.slice/crio-bd4e4721720c1ed708a6f31c97d6f55890a889a6d12fb69d8d0faa1bd2cb33cb WatchSource:0}: Error finding container bd4e4721720c1ed708a6f31c97d6f55890a889a6d12fb69d8d0faa1bd2cb33cb: Status 404 returned error can't find the container with id bd4e4721720c1ed708a6f31c97d6f55890a889a6d12fb69d8d0faa1bd2cb33cb Sep 30 17:14:31 crc kubenswrapper[4818]: I0930 17:14:31.920230 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-589c58c6c-52hvk"] Sep 30 17:14:31 crc kubenswrapper[4818]: W0930 17:14:31.934569 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8682f68d_8f1a_40c2_a06f_412cf86e26db.slice/crio-70c66c3026d72706af6448de4eb7a039fcf0d810b13f62a0a1ae7cf7badc142a WatchSource:0}: Error finding container 70c66c3026d72706af6448de4eb7a039fcf0d810b13f62a0a1ae7cf7badc142a: Status 404 returned error can't find the container with id 70c66c3026d72706af6448de4eb7a039fcf0d810b13f62a0a1ae7cf7badc142a Sep 30 17:14:31 crc kubenswrapper[4818]: I0930 17:14:31.947236 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5bd55b4bff-bk24d"] Sep 30 17:14:31 crc kubenswrapper[4818]: W0930 17:14:31.953999 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podea4afb9c_f223_4571_8d05_a4ed581c8116.slice/crio-c707c0974b9734e39582f948ea2b22680ec087026ca4c3c32a2716f9c56ec37b WatchSource:0}: Error finding container c707c0974b9734e39582f948ea2b22680ec087026ca4c3c32a2716f9c56ec37b: Status 404 returned error can't find the container with id c707c0974b9734e39582f948ea2b22680ec087026ca4c3c32a2716f9c56ec37b Sep 30 17:14:31 crc kubenswrapper[4818]: I0930 17:14:31.957494 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-7d857cc749-d4c9x"] Sep 30 17:14:31 crc kubenswrapper[4818]: E0930 17:14:31.969096 4818 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:e6f1ed6b386f77415c2a44e770d98ab6d16b6f6b494c4d1b4ac4b46368c4a4e6,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6n2h5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-7975b88857-6gvjh_openstack-operators(609a391b-24d6-41f8-ad04-2c0a6e35de6b): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Sep 30 17:14:31 crc kubenswrapper[4818]: I0930 17:14:31.986248 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-c7c776c96-7kj24"] Sep 30 17:14:31 crc kubenswrapper[4818]: E0930 17:14:31.996005 4818 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:4d08afd31dc5ded10c54a5541f514ac351e9b40a183285b3db27d0757a6354c8,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qjkwz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-76fcc6dc7c-lnnkn_openstack-operators(ec885f64-c0b7-4541-826e-2405c7a8c4e6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.019831 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-7975b88857-6gvjh"] Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.032072 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-88c7-2wcsq"] Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.039739 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-9976ff44c-wn6hs"] Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.047011 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-6d68dbc695-shjzt"] Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.049809 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-64d7b59854-v6kc9"] Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.056138 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-lnnkn"] Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.109730 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/884fad81-43e8-4b9c-b517-83d24d16f9cd-cert\") pod \"openstack-operator-controller-manager-b7d9776bd-qhghl\" (UID: \"884fad81-43e8-4b9c-b517-83d24d16f9cd\") " pod="openstack-operators/openstack-operator-controller-manager-b7d9776bd-qhghl" Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.116547 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/884fad81-43e8-4b9c-b517-83d24d16f9cd-cert\") pod \"openstack-operator-controller-manager-b7d9776bd-qhghl\" (UID: \"884fad81-43e8-4b9c-b517-83d24d16f9cd\") " pod="openstack-operators/openstack-operator-controller-manager-b7d9776bd-qhghl" Sep 30 17:14:32 crc kubenswrapper[4818]: E0930 17:14:32.142330 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ironic-operator-controller-manager-7975b88857-6gvjh" podUID="609a391b-24d6-41f8-ad04-2c0a6e35de6b" Sep 30 17:14:32 crc kubenswrapper[4818]: E0930 17:14:32.143090 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-lnnkn" podUID="ec885f64-c0b7-4541-826e-2405c7a8c4e6" Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.193418 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-dq42t"] Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.200321 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-vn27d"] Sep 30 17:14:32 crc kubenswrapper[4818]: W0930 17:14:32.205009 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podac78c97f_d06d_4817_aba8_145f7ec5c3ee.slice/crio-ffb44fdc804a05f258fcbc36a8a9934993a85c7ef70619f133a26bcf5a0b3439 WatchSource:0}: Error finding container ffb44fdc804a05f258fcbc36a8a9934993a85c7ef70619f133a26bcf5a0b3439: Status 404 returned error can't find the container with id ffb44fdc804a05f258fcbc36a8a9934993a85c7ef70619f133a26bcf5a0b3439 Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.212735 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-f66b554c6-lcj9j"] Sep 30 17:14:32 crc kubenswrapper[4818]: E0930 17:14:32.221707 4818 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.180:5001/openstack-k8s-operators/watcher-operator:12fd3bd8e05dde3568e99748b447d9fecced5ab5,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-t6t5b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-ff74f75dc-kjsvc_openstack-operators(d94046f7-ca49-45f5-970e-e85346bbc77b): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Sep 30 17:14:32 crc kubenswrapper[4818]: E0930 17:14:32.223023 4818 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:a303e460aec09217f90043b8ff19c01061af003b614833b33a593df9c00ddf80,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fwdkw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-f66b554c6-lcj9j_openstack-operators(43743bf4-0c17-4266-b31d-a17cf0e6d330): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.223780 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc"] Sep 30 17:14:32 crc kubenswrapper[4818]: W0930 17:14:32.224839 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod347237b8_ee53_432a_932b_d7e2488b253f.slice/crio-a1003a811b08094b06b7ca3641c66d9daabea097fb5eb99eb2e6d448a25d0bc5 WatchSource:0}: Error finding container a1003a811b08094b06b7ca3641c66d9daabea097fb5eb99eb2e6d448a25d0bc5: Status 404 returned error can't find the container with id a1003a811b08094b06b7ca3641c66d9daabea097fb5eb99eb2e6d448a25d0bc5 Sep 30 17:14:32 crc kubenswrapper[4818]: E0930 17:14:32.226434 4818 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:225524223bf2a7f3a4ce95958fc9ca6fdab02745fb70374e8ff5bf1ddaceda4b,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wqskm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-79d8469568-dq42t_openstack-operators(347237b8-ee53-432a-932b-d7e2488b253f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.227613 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-b7d9776bd-qhghl" Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.227657 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-bc7dc7bd9-jl7hb"] Sep 30 17:14:32 crc kubenswrapper[4818]: E0930 17:14:32.228304 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-dq42t" podUID="347237b8-ee53-432a-932b-d7e2488b253f" Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.252963 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj"] Sep 30 17:14:32 crc kubenswrapper[4818]: E0930 17:14:32.272450 4818 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:3c6f7d737e0196ec302f44354228d783ad3b210a75703dda3b39c15c01a67e8c,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pcvnw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-bc7dc7bd9-jl7hb_openstack-operators(02f33697-4faf-4e8d-8d78-77a6e8ad7d72): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Sep 30 17:14:32 crc kubenswrapper[4818]: E0930 17:14:32.318368 4818 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:e3f947e9034a951620a76eaf41ceec95eefcef0eacb251b10993d6820d5e1af6,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_LIGHTSPEED_IMAGE_URL_DEFAULT,Value:quay.io/openstack-lightspeed/rag-content:os-docs-2024.2,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rxkff,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-6d776955-2x6qj_openstack-operators(d11ea82b-7b70-49df-a288-673cb6ee9e9a): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Sep 30 17:14:32 crc kubenswrapper[4818]: E0930 17:14:32.622419 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-f66b554c6-lcj9j" podUID="43743bf4-0c17-4266-b31d-a17cf0e6d330" Sep 30 17:14:32 crc kubenswrapper[4818]: E0930 17:14:32.717105 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-jl7hb" podUID="02f33697-4faf-4e8d-8d78-77a6e8ad7d72" Sep 30 17:14:32 crc kubenswrapper[4818]: E0930 17:14:32.752134 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" podUID="d94046f7-ca49-45f5-970e-e85346bbc77b" Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.835602 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-b7d9776bd-qhghl"] Sep 30 17:14:32 crc kubenswrapper[4818]: E0930 17:14:32.859707 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj" podUID="d11ea82b-7b70-49df-a288-673cb6ee9e9a" Sep 30 17:14:32 crc kubenswrapper[4818]: W0930 17:14:32.889626 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod884fad81_43e8_4b9c_b517_83d24d16f9cd.slice/crio-4f6527099f2512c3e54ea5852d1aee7a2188e6fb7ee841cdc7f6e848228113a0 WatchSource:0}: Error finding container 4f6527099f2512c3e54ea5852d1aee7a2188e6fb7ee841cdc7f6e848228113a0: Status 404 returned error can't find the container with id 4f6527099f2512c3e54ea5852d1aee7a2188e6fb7ee841cdc7f6e848228113a0 Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.904868 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-shjzt" event={"ID":"ea4afb9c-f223-4571-8d05-a4ed581c8116","Type":"ContainerStarted","Data":"c707c0974b9734e39582f948ea2b22680ec087026ca4c3c32a2716f9c56ec37b"} Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.915671 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-589c58c6c-52hvk" event={"ID":"4b6ffd9b-6e0e-44f3-bce2-19c1fdf8d0ee","Type":"ContainerStarted","Data":"bd4e4721720c1ed708a6f31c97d6f55890a889a6d12fb69d8d0faa1bd2cb33cb"} Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.917568 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-wn6hs" event={"ID":"92b0d181-cc90-43f8-a3a4-86a9a65b4c73","Type":"ContainerStarted","Data":"88e7dae9c4507a15b7c25663181e01060d0a8d9626b63df4d72331990481dd32"} Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.919924 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-f66b554c6-lcj9j" event={"ID":"43743bf4-0c17-4266-b31d-a17cf0e6d330","Type":"ContainerStarted","Data":"736bfb63eeb7ea4494ff5e81d644e414797f78a1d556769048bdeba1ce8b9a64"} Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.920811 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-f66b554c6-lcj9j" event={"ID":"43743bf4-0c17-4266-b31d-a17cf0e6d330","Type":"ContainerStarted","Data":"a25d6ac4f1ac695ddf77873c84bf28d5b1de9dc53a87fc59bdc5be0994eadfe2"} Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.924584 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-bk24d" event={"ID":"d4069512-9cf0-4fd3-839a-4afc857dec61","Type":"ContainerStarted","Data":"0a28613ba4c9e74d40c1f223f85094186630cb788e10926da2b1f583e533f4a8"} Sep 30 17:14:32 crc kubenswrapper[4818]: E0930 17:14:32.929314 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:a303e460aec09217f90043b8ff19c01061af003b614833b33a593df9c00ddf80\\\"\"" pod="openstack-operators/test-operator-controller-manager-f66b554c6-lcj9j" podUID="43743bf4-0c17-4266-b31d-a17cf0e6d330" Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.952232 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-jl7hb" event={"ID":"02f33697-4faf-4e8d-8d78-77a6e8ad7d72","Type":"ContainerStarted","Data":"4d691a626aa1d0ffc8059ca6d35fa1df3e3b02573f17b011bf91d22599e0a5b8"} Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.952283 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-jl7hb" event={"ID":"02f33697-4faf-4e8d-8d78-77a6e8ad7d72","Type":"ContainerStarted","Data":"cdf1dd2e5cdd3dd7d9abcffd068b007d4d9ff0af29d4dd4285a42cd8101c9cb8"} Sep 30 17:14:32 crc kubenswrapper[4818]: E0930 17:14:32.966964 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:3c6f7d737e0196ec302f44354228d783ad3b210a75703dda3b39c15c01a67e8c\\\"\"" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-jl7hb" podUID="02f33697-4faf-4e8d-8d78-77a6e8ad7d72" Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.981592 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj" event={"ID":"d11ea82b-7b70-49df-a288-673cb6ee9e9a","Type":"ContainerStarted","Data":"957b8d5b9742125a288ad1e852918c87dde086bbfc65fea01a43445ceec68be5"} Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.981645 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj" event={"ID":"d11ea82b-7b70-49df-a288-673cb6ee9e9a","Type":"ContainerStarted","Data":"0e6cfcefe1fdbb87cff464cada008d6eb30f854b8f7769918bbaae43d5f0a9d1"} Sep 30 17:14:32 crc kubenswrapper[4818]: E0930 17:14:32.986008 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:e3f947e9034a951620a76eaf41ceec95eefcef0eacb251b10993d6820d5e1af6\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj" podUID="d11ea82b-7b70-49df-a288-673cb6ee9e9a" Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.990878 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-45xpz" event={"ID":"04b289df-a81e-43b7-8aa1-66c50deeccf6","Type":"ContainerStarted","Data":"b7edb3005053599f8b1743714692ff9dea548146e2e2dc371c2c991cc0cf517f"} Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.993138 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-c7c776c96-7kj24" event={"ID":"211a9bb6-3ab5-47e6-92e4-32eb396dd4dc","Type":"ContainerStarted","Data":"c2d88294e7a5a448a8107ad310020d09428307fe3b95691d5293ab1da57839fe"} Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.994958 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-7975b88857-6gvjh" event={"ID":"609a391b-24d6-41f8-ad04-2c0a6e35de6b","Type":"ContainerStarted","Data":"11e47400e7798cd307c2ef132de8ff1f066b73240d1dc75efdc96445ef81142c"} Sep 30 17:14:32 crc kubenswrapper[4818]: I0930 17:14:32.995007 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-7975b88857-6gvjh" event={"ID":"609a391b-24d6-41f8-ad04-2c0a6e35de6b","Type":"ContainerStarted","Data":"da4c0b63fd2760eb004d8ca126eca9168561f937433d8de2d5083b79fa3d91b8"} Sep 30 17:14:33 crc kubenswrapper[4818]: E0930 17:14:33.001223 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:e6f1ed6b386f77415c2a44e770d98ab6d16b6f6b494c4d1b4ac4b46368c4a4e6\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-7975b88857-6gvjh" podUID="609a391b-24d6-41f8-ad04-2c0a6e35de6b" Sep 30 17:14:33 crc kubenswrapper[4818]: I0930 17:14:33.001540 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" event={"ID":"d94046f7-ca49-45f5-970e-e85346bbc77b","Type":"ContainerStarted","Data":"db15922ac3aecf3d6a56164794eed95b70b698b9ae738d71f27450bde7c9b7c1"} Sep 30 17:14:33 crc kubenswrapper[4818]: I0930 17:14:33.001566 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" event={"ID":"d94046f7-ca49-45f5-970e-e85346bbc77b","Type":"ContainerStarted","Data":"d16c77cd1365891bd6deeadecb41502a193a27570d2f903a5893978efbebd888"} Sep 30 17:14:33 crc kubenswrapper[4818]: E0930 17:14:33.003305 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.180:5001/openstack-k8s-operators/watcher-operator:12fd3bd8e05dde3568e99748b447d9fecced5ab5\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" podUID="d94046f7-ca49-45f5-970e-e85346bbc77b" Sep 30 17:14:33 crc kubenswrapper[4818]: I0930 17:14:33.004133 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-dq42t" event={"ID":"347237b8-ee53-432a-932b-d7e2488b253f","Type":"ContainerStarted","Data":"a1003a811b08094b06b7ca3641c66d9daabea097fb5eb99eb2e6d448a25d0bc5"} Sep 30 17:14:33 crc kubenswrapper[4818]: E0930 17:14:33.005504 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:225524223bf2a7f3a4ce95958fc9ca6fdab02745fb70374e8ff5bf1ddaceda4b\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-dq42t" podUID="347237b8-ee53-432a-932b-d7e2488b253f" Sep 30 17:14:33 crc kubenswrapper[4818]: I0930 17:14:33.010372 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-vn27d" event={"ID":"ac78c97f-d06d-4817-aba8-145f7ec5c3ee","Type":"ContainerStarted","Data":"ffb44fdc804a05f258fcbc36a8a9934993a85c7ef70619f133a26bcf5a0b3439"} Sep 30 17:14:33 crc kubenswrapper[4818]: I0930 17:14:33.018324 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-lnnkn" event={"ID":"ec885f64-c0b7-4541-826e-2405c7a8c4e6","Type":"ContainerStarted","Data":"60843814fc1898e2eb7fa9676e85fb6e4b8d6949af76d1709c9afbeca1b0d43e"} Sep 30 17:14:33 crc kubenswrapper[4818]: I0930 17:14:33.018383 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-lnnkn" event={"ID":"ec885f64-c0b7-4541-826e-2405c7a8c4e6","Type":"ContainerStarted","Data":"2711d11c82bfcbdd0f9d947cb8c50fc93ea2cd7965e14f2d5b2dece3a4a6284f"} Sep 30 17:14:33 crc kubenswrapper[4818]: E0930 17:14:33.019795 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:4d08afd31dc5ded10c54a5541f514ac351e9b40a183285b3db27d0757a6354c8\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-lnnkn" podUID="ec885f64-c0b7-4541-826e-2405c7a8c4e6" Sep 30 17:14:33 crc kubenswrapper[4818]: I0930 17:14:33.022976 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-64d7b59854-v6kc9" event={"ID":"bd72fadd-ea9a-43ab-9817-83b6a33b60fb","Type":"ContainerStarted","Data":"1c8502b42d71b0895a8d3d8268afe14232c6d438590a059e15b25ba596659c4d"} Sep 30 17:14:33 crc kubenswrapper[4818]: I0930 17:14:33.027537 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-88c7-2wcsq" event={"ID":"3707a523-c522-4172-9844-75e296641307","Type":"ContainerStarted","Data":"45477e2752df316279355bfc1c60dce50cdea6d47b381b77e750c06fa239539e"} Sep 30 17:14:33 crc kubenswrapper[4818]: I0930 17:14:33.043721 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-7d857cc749-d4c9x" event={"ID":"8682f68d-8f1a-40c2-a06f-412cf86e26db","Type":"ContainerStarted","Data":"70c66c3026d72706af6448de4eb7a039fcf0d810b13f62a0a1ae7cf7badc142a"} Sep 30 17:14:34 crc kubenswrapper[4818]: I0930 17:14:34.074124 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-b7d9776bd-qhghl" event={"ID":"884fad81-43e8-4b9c-b517-83d24d16f9cd","Type":"ContainerStarted","Data":"b75848a77ce1d54b29264364f8f1e374b957dbbe6deb774753ba6637798ed32a"} Sep 30 17:14:34 crc kubenswrapper[4818]: I0930 17:14:34.074378 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-b7d9776bd-qhghl" event={"ID":"884fad81-43e8-4b9c-b517-83d24d16f9cd","Type":"ContainerStarted","Data":"878648735bdac2aa31baab5483dd2c58161ad4c7fc99230bde651fc7dc6aeb05"} Sep 30 17:14:34 crc kubenswrapper[4818]: I0930 17:14:34.074389 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-b7d9776bd-qhghl" event={"ID":"884fad81-43e8-4b9c-b517-83d24d16f9cd","Type":"ContainerStarted","Data":"4f6527099f2512c3e54ea5852d1aee7a2188e6fb7ee841cdc7f6e848228113a0"} Sep 30 17:14:34 crc kubenswrapper[4818]: E0930 17:14:34.076034 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:a303e460aec09217f90043b8ff19c01061af003b614833b33a593df9c00ddf80\\\"\"" pod="openstack-operators/test-operator-controller-manager-f66b554c6-lcj9j" podUID="43743bf4-0c17-4266-b31d-a17cf0e6d330" Sep 30 17:14:34 crc kubenswrapper[4818]: E0930 17:14:34.076631 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:225524223bf2a7f3a4ce95958fc9ca6fdab02745fb70374e8ff5bf1ddaceda4b\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-dq42t" podUID="347237b8-ee53-432a-932b-d7e2488b253f" Sep 30 17:14:34 crc kubenswrapper[4818]: E0930 17:14:34.076718 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:4d08afd31dc5ded10c54a5541f514ac351e9b40a183285b3db27d0757a6354c8\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-lnnkn" podUID="ec885f64-c0b7-4541-826e-2405c7a8c4e6" Sep 30 17:14:34 crc kubenswrapper[4818]: E0930 17:14:34.076778 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.180:5001/openstack-k8s-operators/watcher-operator:12fd3bd8e05dde3568e99748b447d9fecced5ab5\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" podUID="d94046f7-ca49-45f5-970e-e85346bbc77b" Sep 30 17:14:34 crc kubenswrapper[4818]: E0930 17:14:34.076976 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:e3f947e9034a951620a76eaf41ceec95eefcef0eacb251b10993d6820d5e1af6\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj" podUID="d11ea82b-7b70-49df-a288-673cb6ee9e9a" Sep 30 17:14:34 crc kubenswrapper[4818]: E0930 17:14:34.077523 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:3c6f7d737e0196ec302f44354228d783ad3b210a75703dda3b39c15c01a67e8c\\\"\"" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-jl7hb" podUID="02f33697-4faf-4e8d-8d78-77a6e8ad7d72" Sep 30 17:14:34 crc kubenswrapper[4818]: E0930 17:14:34.078582 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:e6f1ed6b386f77415c2a44e770d98ab6d16b6f6b494c4d1b4ac4b46368c4a4e6\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-7975b88857-6gvjh" podUID="609a391b-24d6-41f8-ad04-2c0a6e35de6b" Sep 30 17:14:34 crc kubenswrapper[4818]: I0930 17:14:34.569979 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-b7d9776bd-qhghl" podStartSLOduration=4.569964221 podStartE2EDuration="4.569964221s" podCreationTimestamp="2025-09-30 17:14:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:14:34.546449488 +0000 UTC m=+921.300721304" watchObservedRunningTime="2025-09-30 17:14:34.569964221 +0000 UTC m=+921.324236037" Sep 30 17:14:35 crc kubenswrapper[4818]: I0930 17:14:35.086472 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-b7d9776bd-qhghl" Sep 30 17:14:42 crc kubenswrapper[4818]: I0930 17:14:42.236276 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-b7d9776bd-qhghl" Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.168062 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-64d7b59854-v6kc9" event={"ID":"bd72fadd-ea9a-43ab-9817-83b6a33b60fb","Type":"ContainerStarted","Data":"3ebb1a1f797d307ee50062e5261cd8bf3878fb6ab76602b05b56a6ba2440642e"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.194675 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-45xpz" event={"ID":"04b289df-a81e-43b7-8aa1-66c50deeccf6","Type":"ContainerStarted","Data":"383bb38b4b3f70debf2d345f340fbed41806b2b98a36ef1f347ea977999d114f"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.194716 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-45xpz" event={"ID":"04b289df-a81e-43b7-8aa1-66c50deeccf6","Type":"ContainerStarted","Data":"7d7532e769abc752f074dc4b91af52a825adf0cf6a6705284a8ff061ecd4337e"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.195672 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-45xpz" Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.213423 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-q4g6w" event={"ID":"38a968a2-8f4f-4389-8ee1-852f92ffcb4b","Type":"ContainerStarted","Data":"68f9edc344d9be010ec8d6913f8dae9331b0d0280c41cd0a0b8dcf33fb809c39"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.213463 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-q4g6w" event={"ID":"38a968a2-8f4f-4389-8ee1-852f92ffcb4b","Type":"ContainerStarted","Data":"7fa9c92f866b32526a541374d07361d9c2d000fca0b517253d303b5f349cb580"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.214060 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-q4g6w" Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.238130 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-84958c4d49-r6vlr" event={"ID":"ef92660c-59b4-4bf2-ae84-1873db0c94b2","Type":"ContainerStarted","Data":"7aab8ca86577e2fda32f12592cb48bc55f364cfbd0d0cba51804972c888ff3bd"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.266819 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-45xpz" podStartSLOduration=4.378801909 podStartE2EDuration="16.26680434s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:31.909037607 +0000 UTC m=+918.663309423" lastFinishedPulling="2025-09-30 17:14:43.797040038 +0000 UTC m=+930.551311854" observedRunningTime="2025-09-30 17:14:45.23820128 +0000 UTC m=+931.992473096" watchObservedRunningTime="2025-09-30 17:14:45.26680434 +0000 UTC m=+932.021076156" Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.289125 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-shjzt" event={"ID":"ea4afb9c-f223-4571-8d05-a4ed581c8116","Type":"ContainerStarted","Data":"b8f925c6bea61f903042e34cfd211185e72834cad665c418296301f42ac28f46"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.289388 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-q4g6w" podStartSLOduration=2.887930879 podStartE2EDuration="16.289378757s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:30.396154085 +0000 UTC m=+917.150425901" lastFinishedPulling="2025-09-30 17:14:43.797601953 +0000 UTC m=+930.551873779" observedRunningTime="2025-09-30 17:14:45.288872573 +0000 UTC m=+932.043144389" watchObservedRunningTime="2025-09-30 17:14:45.289378757 +0000 UTC m=+932.043650563" Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.290203 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-shjzt" Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.326949 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-c7c776c96-7kj24" event={"ID":"211a9bb6-3ab5-47e6-92e4-32eb396dd4dc","Type":"ContainerStarted","Data":"3a74f699eb2924bf2441288470724fcccfd3b408960cf5fd45bdd082a55d0996"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.344598 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-wn6hs" event={"ID":"92b0d181-cc90-43f8-a3a4-86a9a65b4c73","Type":"ContainerStarted","Data":"4a6d0656c4fd5065d011b266a517b76015b27666e7df353878c21eb685fa3898"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.344589 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-shjzt" podStartSLOduration=4.473819337 podStartE2EDuration="16.344567502s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:31.95890261 +0000 UTC m=+918.713174416" lastFinishedPulling="2025-09-30 17:14:43.829650765 +0000 UTC m=+930.583922581" observedRunningTime="2025-09-30 17:14:45.331654255 +0000 UTC m=+932.085926071" watchObservedRunningTime="2025-09-30 17:14:45.344567502 +0000 UTC m=+932.098839318" Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.358831 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-8jqzd" event={"ID":"91d19c88-bc5e-4741-bae8-ac4cfcf5a3b8","Type":"ContainerStarted","Data":"3bd1919d4cfe63ddc72fe19193e7c7e45fa40564345359d8740182286323d0a9"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.360118 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-8jqzd" Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.369451 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-88c7-2wcsq" event={"ID":"3707a523-c522-4172-9844-75e296641307","Type":"ContainerStarted","Data":"cced1055aabd323ae015ad869c0d8a1c5fec6794cbde325543ed66ee11885948"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.369492 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-88c7-2wcsq" event={"ID":"3707a523-c522-4172-9844-75e296641307","Type":"ContainerStarted","Data":"24973b31a39ed1595ec4f1551deffb28e9360a22d772c6e3ea90943bf6801aa1"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.369834 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-88c7-2wcsq" Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.380643 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-7d857cc749-d4c9x" event={"ID":"8682f68d-8f1a-40c2-a06f-412cf86e26db","Type":"ContainerStarted","Data":"b0f97ff2c12cc14455768af08c0ddadd2363144aa560ca9c4c160aa824035acf"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.389495 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-tkdbz" event={"ID":"7e2b35e1-f9e8-4ba0-88a5-9cbe8434942b","Type":"ContainerStarted","Data":"5f5a62fcedd3317459ae109a95b28c8eb2dcf878e802a27bfbc37cf9d7c7ed1c"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.389545 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-tkdbz" event={"ID":"7e2b35e1-f9e8-4ba0-88a5-9cbe8434942b","Type":"ContainerStarted","Data":"c280015b53621c567f2d94cc0d63c5621cd34f68b639a2eae04454dffad58658"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.390163 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-tkdbz" Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.401032 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-589c58c6c-52hvk" event={"ID":"4b6ffd9b-6e0e-44f3-bce2-19c1fdf8d0ee","Type":"ContainerStarted","Data":"f6f06ff6136e1285de09b046f271d1b1db8c2dcff9acd1f7ec3421fb2870d40b"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.411209 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-bk24d" event={"ID":"d4069512-9cf0-4fd3-839a-4afc857dec61","Type":"ContainerStarted","Data":"1fb9e87d6aef5e0e40ba020f5ab028be613f9ae2c8772c0497e47c063b7767e9"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.411250 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-bk24d" event={"ID":"d4069512-9cf0-4fd3-839a-4afc857dec61","Type":"ContainerStarted","Data":"1dd5940943372e09a92bd11a340ebf61ebea173fc1c6a80ef2a0655b87d809b8"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.411848 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-bk24d" Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.422840 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-vn27d" event={"ID":"ac78c97f-d06d-4817-aba8-145f7ec5c3ee","Type":"ContainerStarted","Data":"6647883c581c68674278ba3c20bf65bd75774da7052a239fee53b0e7e8b1f55c"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.423490 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-vn27d" Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.425462 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-8jqzd" podStartSLOduration=4.215322369 podStartE2EDuration="16.425450369s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:31.58701412 +0000 UTC m=+918.341285926" lastFinishedPulling="2025-09-30 17:14:43.79714211 +0000 UTC m=+930.551413926" observedRunningTime="2025-09-30 17:14:45.392898733 +0000 UTC m=+932.147170549" watchObservedRunningTime="2025-09-30 17:14:45.425450369 +0000 UTC m=+932.179722185" Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.427797 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-tkdbz" podStartSLOduration=3.320331118 podStartE2EDuration="16.427787632s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:30.66009455 +0000 UTC m=+917.414366366" lastFinishedPulling="2025-09-30 17:14:43.767551064 +0000 UTC m=+930.521822880" observedRunningTime="2025-09-30 17:14:45.423779314 +0000 UTC m=+932.178051120" watchObservedRunningTime="2025-09-30 17:14:45.427787632 +0000 UTC m=+932.182059448" Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.427991 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-hgrdk" event={"ID":"e8be5e01-df2e-4b18-a8bd-9b48b962f487","Type":"ContainerStarted","Data":"75502db924488124058a96a63e5e79399ee12b33b25da18119c9bca7ed98b1e4"} Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.461696 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-88c7-2wcsq" podStartSLOduration=4.605654175 podStartE2EDuration="16.461678544s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:31.940142715 +0000 UTC m=+918.694414531" lastFinishedPulling="2025-09-30 17:14:43.796167084 +0000 UTC m=+930.550438900" observedRunningTime="2025-09-30 17:14:45.460259676 +0000 UTC m=+932.214531492" watchObservedRunningTime="2025-09-30 17:14:45.461678544 +0000 UTC m=+932.215950360" Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.484918 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-vn27d" podStartSLOduration=4.91565746 podStartE2EDuration="16.48490309s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:32.212423914 +0000 UTC m=+918.966695730" lastFinishedPulling="2025-09-30 17:14:43.781669544 +0000 UTC m=+930.535941360" observedRunningTime="2025-09-30 17:14:45.483643146 +0000 UTC m=+932.237914962" watchObservedRunningTime="2025-09-30 17:14:45.48490309 +0000 UTC m=+932.239174906" Sep 30 17:14:45 crc kubenswrapper[4818]: I0930 17:14:45.510560 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-bk24d" podStartSLOduration=4.635761805 podStartE2EDuration="16.51054659s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:31.935402597 +0000 UTC m=+918.689674413" lastFinishedPulling="2025-09-30 17:14:43.810187382 +0000 UTC m=+930.564459198" observedRunningTime="2025-09-30 17:14:45.508451463 +0000 UTC m=+932.262723279" watchObservedRunningTime="2025-09-30 17:14:45.51054659 +0000 UTC m=+932.264818406" Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.438357 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-shjzt" event={"ID":"ea4afb9c-f223-4571-8d05-a4ed581c8116","Type":"ContainerStarted","Data":"dbb2b3595aabe6e7381b079c8c16ae9ca100b213caa6ea0c981c1fc83ddd8fd5"} Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.442722 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-vn27d" event={"ID":"ac78c97f-d06d-4817-aba8-145f7ec5c3ee","Type":"ContainerStarted","Data":"da93cdf6676ab07de0accf5be662e969c36cc076a4d13d26fd5e577a11993f66"} Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.446267 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-64d7b59854-v6kc9" event={"ID":"bd72fadd-ea9a-43ab-9817-83b6a33b60fb","Type":"ContainerStarted","Data":"7d0376cea61c3c1d78cffa80ff12493a2c5379bc7a66c542fa3ec9e5b5f5f813"} Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.446876 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-64d7b59854-v6kc9" Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.450583 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-8jqzd" event={"ID":"91d19c88-bc5e-4741-bae8-ac4cfcf5a3b8","Type":"ContainerStarted","Data":"5d17bba50984820b0e1f623a6ad13d3a300af58b5a9e6e6faea06dd159e9557f"} Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.453446 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-7d857cc749-d4c9x" event={"ID":"8682f68d-8f1a-40c2-a06f-412cf86e26db","Type":"ContainerStarted","Data":"7bc7e268b93448db3d5feb584d985144605a62b9defeedc83d012bc7c51c6aa7"} Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.453821 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-7d857cc749-d4c9x" Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.456286 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-c7c776c96-7kj24" event={"ID":"211a9bb6-3ab5-47e6-92e4-32eb396dd4dc","Type":"ContainerStarted","Data":"e611a5d29eb2c4f64c20f2e16a3f96a76713c49a8943288aa780eb7c7ed25cb4"} Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.456702 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-c7c776c96-7kj24" Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.458879 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-589c58c6c-52hvk" event={"ID":"4b6ffd9b-6e0e-44f3-bce2-19c1fdf8d0ee","Type":"ContainerStarted","Data":"7988b08760eb5b9b918b021b8cf62eb71cc852051a9bbe03cb431f69f27571e9"} Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.459286 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-589c58c6c-52hvk" Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.470571 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-84958c4d49-r6vlr" event={"ID":"ef92660c-59b4-4bf2-ae84-1873db0c94b2","Type":"ContainerStarted","Data":"8c71cf326a40d9f82025560ff3262b30d745d70c93c6748769ced227425f2256"} Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.470812 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-84958c4d49-r6vlr" Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.472507 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-64d7b59854-v6kc9" podStartSLOduration=5.630844991 podStartE2EDuration="17.472481803s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:31.985586068 +0000 UTC m=+918.739857884" lastFinishedPulling="2025-09-30 17:14:43.82722288 +0000 UTC m=+930.581494696" observedRunningTime="2025-09-30 17:14:46.468738212 +0000 UTC m=+933.223010028" watchObservedRunningTime="2025-09-30 17:14:46.472481803 +0000 UTC m=+933.226753619" Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.474776 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-hgrdk" event={"ID":"e8be5e01-df2e-4b18-a8bd-9b48b962f487","Type":"ContainerStarted","Data":"396bdd405ecdcb1fd17b65a43ab10e3aaeea137b02df537d89d65418f5b32f24"} Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.475299 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-hgrdk" Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.477792 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-wn6hs" event={"ID":"92b0d181-cc90-43f8-a3a4-86a9a65b4c73","Type":"ContainerStarted","Data":"fd034b5032768f2c1ed6dab62926a2f4e3b611c07e13e116d005e91950856db0"} Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.478052 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-wn6hs" Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.496417 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-7d857cc749-d4c9x" podStartSLOduration=5.659334457 podStartE2EDuration="17.496397736s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:31.95408689 +0000 UTC m=+918.708358706" lastFinishedPulling="2025-09-30 17:14:43.791150169 +0000 UTC m=+930.545421985" observedRunningTime="2025-09-30 17:14:46.487064715 +0000 UTC m=+933.241336541" watchObservedRunningTime="2025-09-30 17:14:46.496397736 +0000 UTC m=+933.250669552" Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.507176 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-589c58c6c-52hvk" podStartSLOduration=5.56132763 podStartE2EDuration="17.507157116s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:31.926396325 +0000 UTC m=+918.680668141" lastFinishedPulling="2025-09-30 17:14:43.872225801 +0000 UTC m=+930.626497627" observedRunningTime="2025-09-30 17:14:46.503829896 +0000 UTC m=+933.258101712" watchObservedRunningTime="2025-09-30 17:14:46.507157116 +0000 UTC m=+933.261428932" Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.521627 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-c7c776c96-7kj24" podStartSLOduration=5.6784181910000004 podStartE2EDuration="17.521606245s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:31.966389361 +0000 UTC m=+918.720661177" lastFinishedPulling="2025-09-30 17:14:43.809577415 +0000 UTC m=+930.563849231" observedRunningTime="2025-09-30 17:14:46.52031868 +0000 UTC m=+933.274590516" watchObservedRunningTime="2025-09-30 17:14:46.521606245 +0000 UTC m=+933.275878071" Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.536789 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-hgrdk" podStartSLOduration=5.387166842 podStartE2EDuration="17.536769563s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:31.656898761 +0000 UTC m=+918.411170577" lastFinishedPulling="2025-09-30 17:14:43.806501482 +0000 UTC m=+930.560773298" observedRunningTime="2025-09-30 17:14:46.535208861 +0000 UTC m=+933.289480677" watchObservedRunningTime="2025-09-30 17:14:46.536769563 +0000 UTC m=+933.291041389" Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.562863 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-84958c4d49-r6vlr" podStartSLOduration=5.322388748 podStartE2EDuration="17.562845085s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:31.569223371 +0000 UTC m=+918.323495177" lastFinishedPulling="2025-09-30 17:14:43.809679698 +0000 UTC m=+930.563951514" observedRunningTime="2025-09-30 17:14:46.562091795 +0000 UTC m=+933.316363631" watchObservedRunningTime="2025-09-30 17:14:46.562845085 +0000 UTC m=+933.317116901" Sep 30 17:14:46 crc kubenswrapper[4818]: I0930 17:14:46.580414 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-wn6hs" podStartSLOduration=5.739427873 podStartE2EDuration="17.580394567s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:31.966687199 +0000 UTC m=+918.720959015" lastFinishedPulling="2025-09-30 17:14:43.807653893 +0000 UTC m=+930.561925709" observedRunningTime="2025-09-30 17:14:46.577200481 +0000 UTC m=+933.331472297" watchObservedRunningTime="2025-09-30 17:14:46.580394567 +0000 UTC m=+933.334666383" Sep 30 17:14:47 crc kubenswrapper[4818]: I0930 17:14:47.511038 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-lnnkn" event={"ID":"ec885f64-c0b7-4541-826e-2405c7a8c4e6","Type":"ContainerStarted","Data":"22c6945436b06a409ba2ed1c66cbc6e9e638125575e1a9611bafdf4104d00d3e"} Sep 30 17:14:47 crc kubenswrapper[4818]: I0930 17:14:47.512152 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-lnnkn" Sep 30 17:14:47 crc kubenswrapper[4818]: I0930 17:14:47.527756 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-lnnkn" podStartSLOduration=3.551662635 podStartE2EDuration="18.527739147s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:31.995838634 +0000 UTC m=+918.750110450" lastFinishedPulling="2025-09-30 17:14:46.971915146 +0000 UTC m=+933.726186962" observedRunningTime="2025-09-30 17:14:47.527131701 +0000 UTC m=+934.281403537" watchObservedRunningTime="2025-09-30 17:14:47.527739147 +0000 UTC m=+934.282010963" Sep 30 17:14:49 crc kubenswrapper[4818]: I0930 17:14:49.532299 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-7975b88857-6gvjh" event={"ID":"609a391b-24d6-41f8-ad04-2c0a6e35de6b","Type":"ContainerStarted","Data":"83fbe6f02ecc0d91b2c992358dbb7e1f04a19cac54068cb50ac995b36c9c2051"} Sep 30 17:14:49 crc kubenswrapper[4818]: I0930 17:14:49.532747 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-7975b88857-6gvjh" Sep 30 17:14:49 crc kubenswrapper[4818]: I0930 17:14:49.539296 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-jl7hb" event={"ID":"02f33697-4faf-4e8d-8d78-77a6e8ad7d72","Type":"ContainerStarted","Data":"1eddc5539d152358e1a8a664e0422c3aafe505e64d5de64050f7706c2fba7050"} Sep 30 17:14:49 crc kubenswrapper[4818]: I0930 17:14:49.539634 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-jl7hb" Sep 30 17:14:49 crc kubenswrapper[4818]: I0930 17:14:49.544637 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-f66b554c6-lcj9j" event={"ID":"43743bf4-0c17-4266-b31d-a17cf0e6d330","Type":"ContainerStarted","Data":"aef90afeec7080a7c0d42ed5872749a9f98a760e35efd5c05448ea9050c9cfe1"} Sep 30 17:14:49 crc kubenswrapper[4818]: I0930 17:14:49.544861 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-f66b554c6-lcj9j" Sep 30 17:14:49 crc kubenswrapper[4818]: I0930 17:14:49.553350 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-7975b88857-6gvjh" podStartSLOduration=3.311180611 podStartE2EDuration="20.5533339s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:31.96896599 +0000 UTC m=+918.723237806" lastFinishedPulling="2025-09-30 17:14:49.211119279 +0000 UTC m=+935.965391095" observedRunningTime="2025-09-30 17:14:49.547893534 +0000 UTC m=+936.302165360" watchObservedRunningTime="2025-09-30 17:14:49.5533339 +0000 UTC m=+936.307605716" Sep 30 17:14:49 crc kubenswrapper[4818]: I0930 17:14:49.557903 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-q4g6w" Sep 30 17:14:49 crc kubenswrapper[4818]: I0930 17:14:49.569745 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-f66b554c6-lcj9j" podStartSLOduration=3.6107532449999997 podStartE2EDuration="20.569723231s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:32.222905286 +0000 UTC m=+918.977177092" lastFinishedPulling="2025-09-30 17:14:49.181875262 +0000 UTC m=+935.936147078" observedRunningTime="2025-09-30 17:14:49.564008377 +0000 UTC m=+936.318280193" watchObservedRunningTime="2025-09-30 17:14:49.569723231 +0000 UTC m=+936.323995047" Sep 30 17:14:49 crc kubenswrapper[4818]: I0930 17:14:49.586007 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-jl7hb" podStartSLOduration=3.678526359 podStartE2EDuration="20.585989849s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:32.272229883 +0000 UTC m=+919.026501699" lastFinishedPulling="2025-09-30 17:14:49.179693373 +0000 UTC m=+935.933965189" observedRunningTime="2025-09-30 17:14:49.579724051 +0000 UTC m=+936.333995867" watchObservedRunningTime="2025-09-30 17:14:49.585989849 +0000 UTC m=+936.340261665" Sep 30 17:14:49 crc kubenswrapper[4818]: I0930 17:14:49.618695 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-tkdbz" Sep 30 17:14:49 crc kubenswrapper[4818]: I0930 17:14:49.656022 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-84958c4d49-r6vlr" Sep 30 17:14:49 crc kubenswrapper[4818]: I0930 17:14:49.750004 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-hgrdk" Sep 30 17:14:49 crc kubenswrapper[4818]: I0930 17:14:49.765618 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-8jqzd" Sep 30 17:14:49 crc kubenswrapper[4818]: I0930 17:14:49.874263 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-45xpz" Sep 30 17:14:50 crc kubenswrapper[4818]: I0930 17:14:50.030232 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-7d857cc749-d4c9x" Sep 30 17:14:50 crc kubenswrapper[4818]: I0930 17:14:50.069180 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-bk24d" Sep 30 17:14:50 crc kubenswrapper[4818]: I0930 17:14:50.091279 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-589c58c6c-52hvk" Sep 30 17:14:50 crc kubenswrapper[4818]: I0930 17:14:50.097442 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-shjzt" Sep 30 17:14:50 crc kubenswrapper[4818]: I0930 17:14:50.150810 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-88c7-2wcsq" Sep 30 17:14:50 crc kubenswrapper[4818]: I0930 17:14:50.155733 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-64d7b59854-v6kc9" Sep 30 17:14:50 crc kubenswrapper[4818]: I0930 17:14:50.197651 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-c7c776c96-7kj24" Sep 30 17:14:50 crc kubenswrapper[4818]: I0930 17:14:50.407438 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-wn6hs" Sep 30 17:14:50 crc kubenswrapper[4818]: I0930 17:14:50.471034 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-vn27d" Sep 30 17:14:52 crc kubenswrapper[4818]: I0930 17:14:52.596077 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:14:52 crc kubenswrapper[4818]: I0930 17:14:52.596538 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:14:52 crc kubenswrapper[4818]: I0930 17:14:52.596675 4818 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 17:14:52 crc kubenswrapper[4818]: I0930 17:14:52.597398 4818 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bef732d2824af20a982f56bcc38b49ae15a3f1c74de5e344956d8799c207e863"} pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 30 17:14:52 crc kubenswrapper[4818]: I0930 17:14:52.597452 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" containerID="cri-o://bef732d2824af20a982f56bcc38b49ae15a3f1c74de5e344956d8799c207e863" gracePeriod=600 Sep 30 17:14:53 crc kubenswrapper[4818]: I0930 17:14:53.583433 4818 generic.go:334] "Generic (PLEG): container finished" podID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerID="bef732d2824af20a982f56bcc38b49ae15a3f1c74de5e344956d8799c207e863" exitCode=0 Sep 30 17:14:53 crc kubenswrapper[4818]: I0930 17:14:53.583507 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerDied","Data":"bef732d2824af20a982f56bcc38b49ae15a3f1c74de5e344956d8799c207e863"} Sep 30 17:14:53 crc kubenswrapper[4818]: I0930 17:14:53.583852 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerStarted","Data":"8f6686d61e096db5e2902b7d245395d3a5ea7e0fa983b9dcf9c5710b1f2ecad9"} Sep 30 17:14:53 crc kubenswrapper[4818]: I0930 17:14:53.583879 4818 scope.go:117] "RemoveContainer" containerID="f2e60af7181a017f3a998586cfb2fbfcd7d49b22c87395265f8c90eee19ee429" Sep 30 17:14:53 crc kubenswrapper[4818]: I0930 17:14:53.593255 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj" event={"ID":"d11ea82b-7b70-49df-a288-673cb6ee9e9a","Type":"ContainerStarted","Data":"1fc5399e0129734b69ff505cb7576cb8d97b40b27f3a54330f688b7e22bf22af"} Sep 30 17:14:53 crc kubenswrapper[4818]: I0930 17:14:53.593499 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj" Sep 30 17:14:53 crc kubenswrapper[4818]: I0930 17:14:53.597840 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" event={"ID":"d94046f7-ca49-45f5-970e-e85346bbc77b","Type":"ContainerStarted","Data":"f9397ff6aea6b8b0d54e612fd0fc6f1de64554d939dff3810f4fed2e0b8e29c8"} Sep 30 17:14:53 crc kubenswrapper[4818]: I0930 17:14:53.598138 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" Sep 30 17:14:53 crc kubenswrapper[4818]: I0930 17:14:53.599632 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-dq42t" event={"ID":"347237b8-ee53-432a-932b-d7e2488b253f","Type":"ContainerStarted","Data":"1758e8b927a7a93e677116ea811a798610ff0b0aa431f3ae39769c8e132e9b2b"} Sep 30 17:14:53 crc kubenswrapper[4818]: I0930 17:14:53.636740 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj" podStartSLOduration=3.797901203 podStartE2EDuration="24.636722143s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:32.317858582 +0000 UTC m=+919.072130398" lastFinishedPulling="2025-09-30 17:14:53.156679502 +0000 UTC m=+939.910951338" observedRunningTime="2025-09-30 17:14:53.632471559 +0000 UTC m=+940.386743405" watchObservedRunningTime="2025-09-30 17:14:53.636722143 +0000 UTC m=+940.390993969" Sep 30 17:14:53 crc kubenswrapper[4818]: I0930 17:14:53.674226 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" podStartSLOduration=3.754941177 podStartE2EDuration="24.674205482s" podCreationTimestamp="2025-09-30 17:14:29 +0000 UTC" firstStartedPulling="2025-09-30 17:14:32.22156489 +0000 UTC m=+918.975836706" lastFinishedPulling="2025-09-30 17:14:53.140829195 +0000 UTC m=+939.895101011" observedRunningTime="2025-09-30 17:14:53.65145553 +0000 UTC m=+940.405727356" watchObservedRunningTime="2025-09-30 17:14:53.674205482 +0000 UTC m=+940.428477308" Sep 30 17:14:53 crc kubenswrapper[4818]: I0930 17:14:53.679262 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-dq42t" podStartSLOduration=2.7502304300000002 podStartE2EDuration="23.679244918s" podCreationTimestamp="2025-09-30 17:14:30 +0000 UTC" firstStartedPulling="2025-09-30 17:14:32.226340888 +0000 UTC m=+918.980612704" lastFinishedPulling="2025-09-30 17:14:53.155355366 +0000 UTC m=+939.909627192" observedRunningTime="2025-09-30 17:14:53.6737456 +0000 UTC m=+940.428017436" watchObservedRunningTime="2025-09-30 17:14:53.679244918 +0000 UTC m=+940.433516744" Sep 30 17:14:59 crc kubenswrapper[4818]: I0930 17:14:59.975586 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-7975b88857-6gvjh" Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.139766 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29320875-sw7nj"] Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.140798 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29320875-sw7nj" Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.146756 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.146809 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.153169 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29320875-sw7nj"] Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.217159 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-lnnkn" Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.281587 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95nsz\" (UniqueName: \"kubernetes.io/projected/b0d6a84b-e8e6-4784-9693-44d7caa785c9-kube-api-access-95nsz\") pod \"collect-profiles-29320875-sw7nj\" (UID: \"b0d6a84b-e8e6-4784-9693-44d7caa785c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320875-sw7nj" Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.281668 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0d6a84b-e8e6-4784-9693-44d7caa785c9-secret-volume\") pod \"collect-profiles-29320875-sw7nj\" (UID: \"b0d6a84b-e8e6-4784-9693-44d7caa785c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320875-sw7nj" Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.281909 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0d6a84b-e8e6-4784-9693-44d7caa785c9-config-volume\") pod \"collect-profiles-29320875-sw7nj\" (UID: \"b0d6a84b-e8e6-4784-9693-44d7caa785c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320875-sw7nj" Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.383485 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0d6a84b-e8e6-4784-9693-44d7caa785c9-config-volume\") pod \"collect-profiles-29320875-sw7nj\" (UID: \"b0d6a84b-e8e6-4784-9693-44d7caa785c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320875-sw7nj" Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.383706 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95nsz\" (UniqueName: \"kubernetes.io/projected/b0d6a84b-e8e6-4784-9693-44d7caa785c9-kube-api-access-95nsz\") pod \"collect-profiles-29320875-sw7nj\" (UID: \"b0d6a84b-e8e6-4784-9693-44d7caa785c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320875-sw7nj" Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.383789 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0d6a84b-e8e6-4784-9693-44d7caa785c9-secret-volume\") pod \"collect-profiles-29320875-sw7nj\" (UID: \"b0d6a84b-e8e6-4784-9693-44d7caa785c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320875-sw7nj" Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.384536 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0d6a84b-e8e6-4784-9693-44d7caa785c9-config-volume\") pod \"collect-profiles-29320875-sw7nj\" (UID: \"b0d6a84b-e8e6-4784-9693-44d7caa785c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320875-sw7nj" Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.392179 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0d6a84b-e8e6-4784-9693-44d7caa785c9-secret-volume\") pod \"collect-profiles-29320875-sw7nj\" (UID: \"b0d6a84b-e8e6-4784-9693-44d7caa785c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320875-sw7nj" Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.410705 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95nsz\" (UniqueName: \"kubernetes.io/projected/b0d6a84b-e8e6-4784-9693-44d7caa785c9-kube-api-access-95nsz\") pod \"collect-profiles-29320875-sw7nj\" (UID: \"b0d6a84b-e8e6-4784-9693-44d7caa785c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320875-sw7nj" Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.434132 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-f66b554c6-lcj9j" Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.462440 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-jl7hb" Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.465797 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29320875-sw7nj" Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.569899 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" Sep 30 17:15:00 crc kubenswrapper[4818]: I0930 17:15:00.926332 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29320875-sw7nj"] Sep 30 17:15:00 crc kubenswrapper[4818]: W0930 17:15:00.933113 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0d6a84b_e8e6_4784_9693_44d7caa785c9.slice/crio-b35c9c82d9b1867503094bff50e81ec7929a9a15ddd2eda4980ab2a28f9d6f29 WatchSource:0}: Error finding container b35c9c82d9b1867503094bff50e81ec7929a9a15ddd2eda4980ab2a28f9d6f29: Status 404 returned error can't find the container with id b35c9c82d9b1867503094bff50e81ec7929a9a15ddd2eda4980ab2a28f9d6f29 Sep 30 17:15:01 crc kubenswrapper[4818]: I0930 17:15:01.685827 4818 generic.go:334] "Generic (PLEG): container finished" podID="b0d6a84b-e8e6-4784-9693-44d7caa785c9" containerID="3f80aab8fafc8f7f132daf703613ee9266768c4d09d49540725f7da5d04cadfd" exitCode=0 Sep 30 17:15:01 crc kubenswrapper[4818]: I0930 17:15:01.685915 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29320875-sw7nj" event={"ID":"b0d6a84b-e8e6-4784-9693-44d7caa785c9","Type":"ContainerDied","Data":"3f80aab8fafc8f7f132daf703613ee9266768c4d09d49540725f7da5d04cadfd"} Sep 30 17:15:01 crc kubenswrapper[4818]: I0930 17:15:01.686095 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29320875-sw7nj" event={"ID":"b0d6a84b-e8e6-4784-9693-44d7caa785c9","Type":"ContainerStarted","Data":"b35c9c82d9b1867503094bff50e81ec7929a9a15ddd2eda4980ab2a28f9d6f29"} Sep 30 17:15:01 crc kubenswrapper[4818]: I0930 17:15:01.745467 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-2x6qj" Sep 30 17:15:03 crc kubenswrapper[4818]: I0930 17:15:03.085866 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29320875-sw7nj" Sep 30 17:15:03 crc kubenswrapper[4818]: I0930 17:15:03.246130 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0d6a84b-e8e6-4784-9693-44d7caa785c9-secret-volume\") pod \"b0d6a84b-e8e6-4784-9693-44d7caa785c9\" (UID: \"b0d6a84b-e8e6-4784-9693-44d7caa785c9\") " Sep 30 17:15:03 crc kubenswrapper[4818]: I0930 17:15:03.246164 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0d6a84b-e8e6-4784-9693-44d7caa785c9-config-volume\") pod \"b0d6a84b-e8e6-4784-9693-44d7caa785c9\" (UID: \"b0d6a84b-e8e6-4784-9693-44d7caa785c9\") " Sep 30 17:15:03 crc kubenswrapper[4818]: I0930 17:15:03.246240 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95nsz\" (UniqueName: \"kubernetes.io/projected/b0d6a84b-e8e6-4784-9693-44d7caa785c9-kube-api-access-95nsz\") pod \"b0d6a84b-e8e6-4784-9693-44d7caa785c9\" (UID: \"b0d6a84b-e8e6-4784-9693-44d7caa785c9\") " Sep 30 17:15:03 crc kubenswrapper[4818]: I0930 17:15:03.247022 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0d6a84b-e8e6-4784-9693-44d7caa785c9-config-volume" (OuterVolumeSpecName: "config-volume") pod "b0d6a84b-e8e6-4784-9693-44d7caa785c9" (UID: "b0d6a84b-e8e6-4784-9693-44d7caa785c9"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:15:03 crc kubenswrapper[4818]: I0930 17:15:03.251200 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0d6a84b-e8e6-4784-9693-44d7caa785c9-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b0d6a84b-e8e6-4784-9693-44d7caa785c9" (UID: "b0d6a84b-e8e6-4784-9693-44d7caa785c9"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:15:03 crc kubenswrapper[4818]: I0930 17:15:03.251764 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0d6a84b-e8e6-4784-9693-44d7caa785c9-kube-api-access-95nsz" (OuterVolumeSpecName: "kube-api-access-95nsz") pod "b0d6a84b-e8e6-4784-9693-44d7caa785c9" (UID: "b0d6a84b-e8e6-4784-9693-44d7caa785c9"). InnerVolumeSpecName "kube-api-access-95nsz". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:15:03 crc kubenswrapper[4818]: I0930 17:15:03.348185 4818 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0d6a84b-e8e6-4784-9693-44d7caa785c9-secret-volume\") on node \"crc\" DevicePath \"\"" Sep 30 17:15:03 crc kubenswrapper[4818]: I0930 17:15:03.348227 4818 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0d6a84b-e8e6-4784-9693-44d7caa785c9-config-volume\") on node \"crc\" DevicePath \"\"" Sep 30 17:15:03 crc kubenswrapper[4818]: I0930 17:15:03.348240 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-95nsz\" (UniqueName: \"kubernetes.io/projected/b0d6a84b-e8e6-4784-9693-44d7caa785c9-kube-api-access-95nsz\") on node \"crc\" DevicePath \"\"" Sep 30 17:15:03 crc kubenswrapper[4818]: I0930 17:15:03.701762 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29320875-sw7nj" event={"ID":"b0d6a84b-e8e6-4784-9693-44d7caa785c9","Type":"ContainerDied","Data":"b35c9c82d9b1867503094bff50e81ec7929a9a15ddd2eda4980ab2a28f9d6f29"} Sep 30 17:15:03 crc kubenswrapper[4818]: I0930 17:15:03.701831 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b35c9c82d9b1867503094bff50e81ec7929a9a15ddd2eda4980ab2a28f9d6f29" Sep 30 17:15:03 crc kubenswrapper[4818]: I0930 17:15:03.701804 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29320875-sw7nj" Sep 30 17:15:07 crc kubenswrapper[4818]: I0930 17:15:07.115592 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc"] Sep 30 17:15:07 crc kubenswrapper[4818]: I0930 17:15:07.116266 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" podUID="d94046f7-ca49-45f5-970e-e85346bbc77b" containerName="kube-rbac-proxy" containerID="cri-o://db15922ac3aecf3d6a56164794eed95b70b698b9ae738d71f27450bde7c9b7c1" gracePeriod=10 Sep 30 17:15:07 crc kubenswrapper[4818]: I0930 17:15:07.116337 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" podUID="d94046f7-ca49-45f5-970e-e85346bbc77b" containerName="manager" containerID="cri-o://f9397ff6aea6b8b0d54e612fd0fc6f1de64554d939dff3810f4fed2e0b8e29c8" gracePeriod=10 Sep 30 17:15:07 crc kubenswrapper[4818]: I0930 17:15:07.736308 4818 generic.go:334] "Generic (PLEG): container finished" podID="d94046f7-ca49-45f5-970e-e85346bbc77b" containerID="f9397ff6aea6b8b0d54e612fd0fc6f1de64554d939dff3810f4fed2e0b8e29c8" exitCode=0 Sep 30 17:15:07 crc kubenswrapper[4818]: I0930 17:15:07.736547 4818 generic.go:334] "Generic (PLEG): container finished" podID="d94046f7-ca49-45f5-970e-e85346bbc77b" containerID="db15922ac3aecf3d6a56164794eed95b70b698b9ae738d71f27450bde7c9b7c1" exitCode=0 Sep 30 17:15:07 crc kubenswrapper[4818]: I0930 17:15:07.736379 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" event={"ID":"d94046f7-ca49-45f5-970e-e85346bbc77b","Type":"ContainerDied","Data":"f9397ff6aea6b8b0d54e612fd0fc6f1de64554d939dff3810f4fed2e0b8e29c8"} Sep 30 17:15:07 crc kubenswrapper[4818]: I0930 17:15:07.736584 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" event={"ID":"d94046f7-ca49-45f5-970e-e85346bbc77b","Type":"ContainerDied","Data":"db15922ac3aecf3d6a56164794eed95b70b698b9ae738d71f27450bde7c9b7c1"} Sep 30 17:15:07 crc kubenswrapper[4818]: I0930 17:15:07.908860 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh"] Sep 30 17:15:07 crc kubenswrapper[4818]: I0930 17:15:07.909092 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh" podUID="2c483975-9057-48f9-a5fb-54e905171d02" containerName="operator" containerID="cri-o://970bd29c0824fecfae74119ad9caa7a608bc3e708308267cf57d3b937407f30f" gracePeriod=10 Sep 30 17:15:07 crc kubenswrapper[4818]: I0930 17:15:07.909155 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh" podUID="2c483975-9057-48f9-a5fb-54e905171d02" containerName="kube-rbac-proxy" containerID="cri-o://3dd92a505ca99f530071d674256910242a81181ea0ac9ff04118e983d6d15fa1" gracePeriod=10 Sep 30 17:15:07 crc kubenswrapper[4818]: I0930 17:15:07.958341 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j"] Sep 30 17:15:07 crc kubenswrapper[4818]: E0930 17:15:07.958652 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0d6a84b-e8e6-4784-9693-44d7caa785c9" containerName="collect-profiles" Sep 30 17:15:07 crc kubenswrapper[4818]: I0930 17:15:07.958668 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0d6a84b-e8e6-4784-9693-44d7caa785c9" containerName="collect-profiles" Sep 30 17:15:07 crc kubenswrapper[4818]: I0930 17:15:07.958816 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0d6a84b-e8e6-4784-9693-44d7caa785c9" containerName="collect-profiles" Sep 30 17:15:07 crc kubenswrapper[4818]: I0930 17:15:07.959517 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j" Sep 30 17:15:07 crc kubenswrapper[4818]: I0930 17:15:07.977176 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j"] Sep 30 17:15:08 crc kubenswrapper[4818]: I0930 17:15:08.024783 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nb8hr\" (UniqueName: \"kubernetes.io/projected/7e3342cd-7b9f-46d2-a977-25659bf9dd3a-kube-api-access-nb8hr\") pod \"watcher-operator-controller-manager-ff74f75dc-55p9j\" (UID: \"7e3342cd-7b9f-46d2-a977-25659bf9dd3a\") " pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j" Sep 30 17:15:08 crc kubenswrapper[4818]: I0930 17:15:08.125903 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nb8hr\" (UniqueName: \"kubernetes.io/projected/7e3342cd-7b9f-46d2-a977-25659bf9dd3a-kube-api-access-nb8hr\") pod \"watcher-operator-controller-manager-ff74f75dc-55p9j\" (UID: \"7e3342cd-7b9f-46d2-a977-25659bf9dd3a\") " pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j" Sep 30 17:15:08 crc kubenswrapper[4818]: I0930 17:15:08.143467 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nb8hr\" (UniqueName: \"kubernetes.io/projected/7e3342cd-7b9f-46d2-a977-25659bf9dd3a-kube-api-access-nb8hr\") pod \"watcher-operator-controller-manager-ff74f75dc-55p9j\" (UID: \"7e3342cd-7b9f-46d2-a977-25659bf9dd3a\") " pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j" Sep 30 17:15:08 crc kubenswrapper[4818]: I0930 17:15:08.366013 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j" Sep 30 17:15:08 crc kubenswrapper[4818]: I0930 17:15:08.745352 4818 generic.go:334] "Generic (PLEG): container finished" podID="2c483975-9057-48f9-a5fb-54e905171d02" containerID="3dd92a505ca99f530071d674256910242a81181ea0ac9ff04118e983d6d15fa1" exitCode=0 Sep 30 17:15:08 crc kubenswrapper[4818]: I0930 17:15:08.745668 4818 generic.go:334] "Generic (PLEG): container finished" podID="2c483975-9057-48f9-a5fb-54e905171d02" containerID="970bd29c0824fecfae74119ad9caa7a608bc3e708308267cf57d3b937407f30f" exitCode=0 Sep 30 17:15:08 crc kubenswrapper[4818]: I0930 17:15:08.745435 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh" event={"ID":"2c483975-9057-48f9-a5fb-54e905171d02","Type":"ContainerDied","Data":"3dd92a505ca99f530071d674256910242a81181ea0ac9ff04118e983d6d15fa1"} Sep 30 17:15:08 crc kubenswrapper[4818]: I0930 17:15:08.745703 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh" event={"ID":"2c483975-9057-48f9-a5fb-54e905171d02","Type":"ContainerDied","Data":"970bd29c0824fecfae74119ad9caa7a608bc3e708308267cf57d3b937407f30f"} Sep 30 17:15:08 crc kubenswrapper[4818]: I0930 17:15:08.823165 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j"] Sep 30 17:15:09 crc kubenswrapper[4818]: I0930 17:15:09.763839 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j" event={"ID":"7e3342cd-7b9f-46d2-a977-25659bf9dd3a","Type":"ContainerStarted","Data":"715a96cb1b3919b6b306992877186813035111b1547d5e2315f72d1e7fbcd1fb"} Sep 30 17:15:10 crc kubenswrapper[4818]: I0930 17:15:10.248395 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-index-cb8dm"] Sep 30 17:15:10 crc kubenswrapper[4818]: I0930 17:15:10.249471 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-index-cb8dm" Sep 30 17:15:10 crc kubenswrapper[4818]: I0930 17:15:10.252158 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-index-dockercfg-jxt9q" Sep 30 17:15:10 crc kubenswrapper[4818]: I0930 17:15:10.261624 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-index-cb8dm"] Sep 30 17:15:10 crc kubenswrapper[4818]: I0930 17:15:10.357193 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rd2v4\" (UniqueName: \"kubernetes.io/projected/12b6c7ef-2f34-4885-9ed0-0522c8057303-kube-api-access-rd2v4\") pod \"watcher-operator-index-cb8dm\" (UID: \"12b6c7ef-2f34-4885-9ed0-0522c8057303\") " pod="openstack-operators/watcher-operator-index-cb8dm" Sep 30 17:15:10 crc kubenswrapper[4818]: I0930 17:15:10.459700 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rd2v4\" (UniqueName: \"kubernetes.io/projected/12b6c7ef-2f34-4885-9ed0-0522c8057303-kube-api-access-rd2v4\") pod \"watcher-operator-index-cb8dm\" (UID: \"12b6c7ef-2f34-4885-9ed0-0522c8057303\") " pod="openstack-operators/watcher-operator-index-cb8dm" Sep 30 17:15:10 crc kubenswrapper[4818]: I0930 17:15:10.492832 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rd2v4\" (UniqueName: \"kubernetes.io/projected/12b6c7ef-2f34-4885-9ed0-0522c8057303-kube-api-access-rd2v4\") pod \"watcher-operator-index-cb8dm\" (UID: \"12b6c7ef-2f34-4885-9ed0-0522c8057303\") " pod="openstack-operators/watcher-operator-index-cb8dm" Sep 30 17:15:10 crc kubenswrapper[4818]: I0930 17:15:10.560259 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" podUID="d94046f7-ca49-45f5-970e-e85346bbc77b" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.100:8081/readyz\": dial tcp 10.217.0.100:8081: connect: connection refused" Sep 30 17:15:10 crc kubenswrapper[4818]: I0930 17:15:10.585077 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-index-cb8dm" Sep 30 17:15:11 crc kubenswrapper[4818]: I0930 17:15:11.141203 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-index-cb8dm"] Sep 30 17:15:11 crc kubenswrapper[4818]: I0930 17:15:11.786289 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-index-cb8dm" event={"ID":"12b6c7ef-2f34-4885-9ed0-0522c8057303","Type":"ContainerStarted","Data":"d62a8d93a1257c19a69ec3f4d9b1e0eaab2c78e25c257ffc90a70d07a6f63c84"} Sep 30 17:15:12 crc kubenswrapper[4818]: I0930 17:15:12.177792 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh" podUID="2c483975-9057-48f9-a5fb-54e905171d02" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.78:8081/readyz\": dial tcp 10.217.0.78:8081: connect: connection refused" Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.452787 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.513623 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t6t5b\" (UniqueName: \"kubernetes.io/projected/d94046f7-ca49-45f5-970e-e85346bbc77b-kube-api-access-t6t5b\") pod \"d94046f7-ca49-45f5-970e-e85346bbc77b\" (UID: \"d94046f7-ca49-45f5-970e-e85346bbc77b\") " Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.520607 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d94046f7-ca49-45f5-970e-e85346bbc77b-kube-api-access-t6t5b" (OuterVolumeSpecName: "kube-api-access-t6t5b") pod "d94046f7-ca49-45f5-970e-e85346bbc77b" (UID: "d94046f7-ca49-45f5-970e-e85346bbc77b"). InnerVolumeSpecName "kube-api-access-t6t5b". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.615825 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t6t5b\" (UniqueName: \"kubernetes.io/projected/d94046f7-ca49-45f5-970e-e85346bbc77b-kube-api-access-t6t5b\") on node \"crc\" DevicePath \"\"" Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.632558 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh" Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.717460 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tw6pj\" (UniqueName: \"kubernetes.io/projected/2c483975-9057-48f9-a5fb-54e905171d02-kube-api-access-tw6pj\") pod \"2c483975-9057-48f9-a5fb-54e905171d02\" (UID: \"2c483975-9057-48f9-a5fb-54e905171d02\") " Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.720514 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c483975-9057-48f9-a5fb-54e905171d02-kube-api-access-tw6pj" (OuterVolumeSpecName: "kube-api-access-tw6pj") pod "2c483975-9057-48f9-a5fb-54e905171d02" (UID: "2c483975-9057-48f9-a5fb-54e905171d02"). InnerVolumeSpecName "kube-api-access-tw6pj". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.805109 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.805126 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc" event={"ID":"d94046f7-ca49-45f5-970e-e85346bbc77b","Type":"ContainerDied","Data":"d16c77cd1365891bd6deeadecb41502a193a27570d2f903a5893978efbebd888"} Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.805189 4818 scope.go:117] "RemoveContainer" containerID="f9397ff6aea6b8b0d54e612fd0fc6f1de64554d939dff3810f4fed2e0b8e29c8" Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.807568 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh" event={"ID":"2c483975-9057-48f9-a5fb-54e905171d02","Type":"ContainerDied","Data":"b5d5a027c5bfde32200fc1c634a9c8f4a75a5e7701e7a9d571d0d7630784973d"} Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.807650 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh" Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.818965 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tw6pj\" (UniqueName: \"kubernetes.io/projected/2c483975-9057-48f9-a5fb-54e905171d02-kube-api-access-tw6pj\") on node \"crc\" DevicePath \"\"" Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.826907 4818 scope.go:117] "RemoveContainer" containerID="db15922ac3aecf3d6a56164794eed95b70b698b9ae738d71f27450bde7c9b7c1" Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.841816 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc"] Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.858215 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-ff74f75dc-kjsvc"] Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.882966 4818 scope.go:117] "RemoveContainer" containerID="3dd92a505ca99f530071d674256910242a81181ea0ac9ff04118e983d6d15fa1" Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.887637 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh"] Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.892388 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-dd685c6cd-b2nqh"] Sep 30 17:15:13 crc kubenswrapper[4818]: I0930 17:15:13.903545 4818 scope.go:117] "RemoveContainer" containerID="970bd29c0824fecfae74119ad9caa7a608bc3e708308267cf57d3b937407f30f" Sep 30 17:15:14 crc kubenswrapper[4818]: I0930 17:15:14.033481 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c483975-9057-48f9-a5fb-54e905171d02" path="/var/lib/kubelet/pods/2c483975-9057-48f9-a5fb-54e905171d02/volumes" Sep 30 17:15:14 crc kubenswrapper[4818]: I0930 17:15:14.034322 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d94046f7-ca49-45f5-970e-e85346bbc77b" path="/var/lib/kubelet/pods/d94046f7-ca49-45f5-970e-e85346bbc77b/volumes" Sep 30 17:15:14 crc kubenswrapper[4818]: I0930 17:15:14.817535 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j" event={"ID":"7e3342cd-7b9f-46d2-a977-25659bf9dd3a","Type":"ContainerStarted","Data":"39d4ea375ee35715369688e9eba44ce926e2f33e8cc2f70f9acead750d3791e9"} Sep 30 17:15:15 crc kubenswrapper[4818]: I0930 17:15:15.832493 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j" event={"ID":"7e3342cd-7b9f-46d2-a977-25659bf9dd3a","Type":"ContainerStarted","Data":"86f63f7f2e232c805c27572f8e6878327b17d4cf447a01424be234ed307c2d04"} Sep 30 17:15:16 crc kubenswrapper[4818]: I0930 17:15:16.846031 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-index-cb8dm" event={"ID":"12b6c7ef-2f34-4885-9ed0-0522c8057303","Type":"ContainerStarted","Data":"e9fd21b3648debf719fc5a1f95704a20f3d3f01ae396852878a29a6224ce0695"} Sep 30 17:15:16 crc kubenswrapper[4818]: I0930 17:15:16.881161 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-index-cb8dm" podStartSLOduration=1.737238297 podStartE2EDuration="6.881133626s" podCreationTimestamp="2025-09-30 17:15:10 +0000 UTC" firstStartedPulling="2025-09-30 17:15:11.164150001 +0000 UTC m=+957.918421817" lastFinishedPulling="2025-09-30 17:15:16.30804533 +0000 UTC m=+963.062317146" observedRunningTime="2025-09-30 17:15:16.879386489 +0000 UTC m=+963.633658315" watchObservedRunningTime="2025-09-30 17:15:16.881133626 +0000 UTC m=+963.635405472" Sep 30 17:15:16 crc kubenswrapper[4818]: I0930 17:15:16.913641 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j" podStartSLOduration=9.913611591 podStartE2EDuration="9.913611591s" podCreationTimestamp="2025-09-30 17:15:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:15:16.902589484 +0000 UTC m=+963.656861330" watchObservedRunningTime="2025-09-30 17:15:16.913611591 +0000 UTC m=+963.667883417" Sep 30 17:15:18 crc kubenswrapper[4818]: I0930 17:15:18.366952 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j" Sep 30 17:15:20 crc kubenswrapper[4818]: I0930 17:15:20.585691 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-index-cb8dm" Sep 30 17:15:20 crc kubenswrapper[4818]: I0930 17:15:20.586090 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/watcher-operator-index-cb8dm" Sep 30 17:15:20 crc kubenswrapper[4818]: I0930 17:15:20.629440 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/watcher-operator-index-cb8dm" Sep 30 17:15:28 crc kubenswrapper[4818]: I0930 17:15:28.373834 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j" Sep 30 17:15:30 crc kubenswrapper[4818]: I0930 17:15:30.628382 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-index-cb8dm" Sep 30 17:15:38 crc kubenswrapper[4818]: I0930 17:15:38.905157 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt"] Sep 30 17:15:38 crc kubenswrapper[4818]: E0930 17:15:38.906112 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d94046f7-ca49-45f5-970e-e85346bbc77b" containerName="kube-rbac-proxy" Sep 30 17:15:38 crc kubenswrapper[4818]: I0930 17:15:38.906128 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="d94046f7-ca49-45f5-970e-e85346bbc77b" containerName="kube-rbac-proxy" Sep 30 17:15:38 crc kubenswrapper[4818]: E0930 17:15:38.906144 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d94046f7-ca49-45f5-970e-e85346bbc77b" containerName="manager" Sep 30 17:15:38 crc kubenswrapper[4818]: I0930 17:15:38.906151 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="d94046f7-ca49-45f5-970e-e85346bbc77b" containerName="manager" Sep 30 17:15:38 crc kubenswrapper[4818]: E0930 17:15:38.906167 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c483975-9057-48f9-a5fb-54e905171d02" containerName="kube-rbac-proxy" Sep 30 17:15:38 crc kubenswrapper[4818]: I0930 17:15:38.906175 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c483975-9057-48f9-a5fb-54e905171d02" containerName="kube-rbac-proxy" Sep 30 17:15:38 crc kubenswrapper[4818]: E0930 17:15:38.906200 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c483975-9057-48f9-a5fb-54e905171d02" containerName="operator" Sep 30 17:15:38 crc kubenswrapper[4818]: I0930 17:15:38.906207 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c483975-9057-48f9-a5fb-54e905171d02" containerName="operator" Sep 30 17:15:38 crc kubenswrapper[4818]: I0930 17:15:38.906369 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c483975-9057-48f9-a5fb-54e905171d02" containerName="kube-rbac-proxy" Sep 30 17:15:38 crc kubenswrapper[4818]: I0930 17:15:38.906391 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c483975-9057-48f9-a5fb-54e905171d02" containerName="operator" Sep 30 17:15:38 crc kubenswrapper[4818]: I0930 17:15:38.906401 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="d94046f7-ca49-45f5-970e-e85346bbc77b" containerName="kube-rbac-proxy" Sep 30 17:15:38 crc kubenswrapper[4818]: I0930 17:15:38.906418 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="d94046f7-ca49-45f5-970e-e85346bbc77b" containerName="manager" Sep 30 17:15:38 crc kubenswrapper[4818]: I0930 17:15:38.907614 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt" Sep 30 17:15:38 crc kubenswrapper[4818]: I0930 17:15:38.909707 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-fsmvv" Sep 30 17:15:38 crc kubenswrapper[4818]: I0930 17:15:38.919530 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt"] Sep 30 17:15:39 crc kubenswrapper[4818]: I0930 17:15:39.062381 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c184175d-d18e-45cb-a79d-d27dfa315d2b-bundle\") pod \"3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt\" (UID: \"c184175d-d18e-45cb-a79d-d27dfa315d2b\") " pod="openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt" Sep 30 17:15:39 crc kubenswrapper[4818]: I0930 17:15:39.062525 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c184175d-d18e-45cb-a79d-d27dfa315d2b-util\") pod \"3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt\" (UID: \"c184175d-d18e-45cb-a79d-d27dfa315d2b\") " pod="openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt" Sep 30 17:15:39 crc kubenswrapper[4818]: I0930 17:15:39.062639 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wj7hm\" (UniqueName: \"kubernetes.io/projected/c184175d-d18e-45cb-a79d-d27dfa315d2b-kube-api-access-wj7hm\") pod \"3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt\" (UID: \"c184175d-d18e-45cb-a79d-d27dfa315d2b\") " pod="openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt" Sep 30 17:15:39 crc kubenswrapper[4818]: I0930 17:15:39.176678 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c184175d-d18e-45cb-a79d-d27dfa315d2b-bundle\") pod \"3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt\" (UID: \"c184175d-d18e-45cb-a79d-d27dfa315d2b\") " pod="openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt" Sep 30 17:15:39 crc kubenswrapper[4818]: I0930 17:15:39.176816 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c184175d-d18e-45cb-a79d-d27dfa315d2b-util\") pod \"3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt\" (UID: \"c184175d-d18e-45cb-a79d-d27dfa315d2b\") " pod="openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt" Sep 30 17:15:39 crc kubenswrapper[4818]: I0930 17:15:39.176903 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wj7hm\" (UniqueName: \"kubernetes.io/projected/c184175d-d18e-45cb-a79d-d27dfa315d2b-kube-api-access-wj7hm\") pod \"3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt\" (UID: \"c184175d-d18e-45cb-a79d-d27dfa315d2b\") " pod="openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt" Sep 30 17:15:39 crc kubenswrapper[4818]: I0930 17:15:39.177524 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c184175d-d18e-45cb-a79d-d27dfa315d2b-util\") pod \"3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt\" (UID: \"c184175d-d18e-45cb-a79d-d27dfa315d2b\") " pod="openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt" Sep 30 17:15:39 crc kubenswrapper[4818]: I0930 17:15:39.178191 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c184175d-d18e-45cb-a79d-d27dfa315d2b-bundle\") pod \"3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt\" (UID: \"c184175d-d18e-45cb-a79d-d27dfa315d2b\") " pod="openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt" Sep 30 17:15:39 crc kubenswrapper[4818]: I0930 17:15:39.207086 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wj7hm\" (UniqueName: \"kubernetes.io/projected/c184175d-d18e-45cb-a79d-d27dfa315d2b-kube-api-access-wj7hm\") pod \"3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt\" (UID: \"c184175d-d18e-45cb-a79d-d27dfa315d2b\") " pod="openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt" Sep 30 17:15:39 crc kubenswrapper[4818]: I0930 17:15:39.225501 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt" Sep 30 17:15:39 crc kubenswrapper[4818]: I0930 17:15:39.656367 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt"] Sep 30 17:15:39 crc kubenswrapper[4818]: W0930 17:15:39.672114 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc184175d_d18e_45cb_a79d_d27dfa315d2b.slice/crio-312a7a76ad16c5843eb4fe88900e2bb8db1de727845ad24b2143b8e4d57195fe WatchSource:0}: Error finding container 312a7a76ad16c5843eb4fe88900e2bb8db1de727845ad24b2143b8e4d57195fe: Status 404 returned error can't find the container with id 312a7a76ad16c5843eb4fe88900e2bb8db1de727845ad24b2143b8e4d57195fe Sep 30 17:15:40 crc kubenswrapper[4818]: I0930 17:15:40.101230 4818 generic.go:334] "Generic (PLEG): container finished" podID="c184175d-d18e-45cb-a79d-d27dfa315d2b" containerID="34b2c0ee28f0dd8fc1e59294f572e7cafe7555b299dce6afb29bf9dabece4aa0" exitCode=0 Sep 30 17:15:40 crc kubenswrapper[4818]: I0930 17:15:40.101294 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt" event={"ID":"c184175d-d18e-45cb-a79d-d27dfa315d2b","Type":"ContainerDied","Data":"34b2c0ee28f0dd8fc1e59294f572e7cafe7555b299dce6afb29bf9dabece4aa0"} Sep 30 17:15:40 crc kubenswrapper[4818]: I0930 17:15:40.101330 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt" event={"ID":"c184175d-d18e-45cb-a79d-d27dfa315d2b","Type":"ContainerStarted","Data":"312a7a76ad16c5843eb4fe88900e2bb8db1de727845ad24b2143b8e4d57195fe"} Sep 30 17:15:41 crc kubenswrapper[4818]: I0930 17:15:41.112044 4818 generic.go:334] "Generic (PLEG): container finished" podID="c184175d-d18e-45cb-a79d-d27dfa315d2b" containerID="d822226a6461a5be8349b129e9b2a4e9239f03631386e5c721844d268a0cffe7" exitCode=0 Sep 30 17:15:41 crc kubenswrapper[4818]: I0930 17:15:41.112454 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt" event={"ID":"c184175d-d18e-45cb-a79d-d27dfa315d2b","Type":"ContainerDied","Data":"d822226a6461a5be8349b129e9b2a4e9239f03631386e5c721844d268a0cffe7"} Sep 30 17:15:42 crc kubenswrapper[4818]: I0930 17:15:42.122971 4818 generic.go:334] "Generic (PLEG): container finished" podID="c184175d-d18e-45cb-a79d-d27dfa315d2b" containerID="2ea1482da1e091573f33a911dab6056960c51dd4b77856bab8620b9a3cc41a82" exitCode=0 Sep 30 17:15:42 crc kubenswrapper[4818]: I0930 17:15:42.123048 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt" event={"ID":"c184175d-d18e-45cb-a79d-d27dfa315d2b","Type":"ContainerDied","Data":"2ea1482da1e091573f33a911dab6056960c51dd4b77856bab8620b9a3cc41a82"} Sep 30 17:15:43 crc kubenswrapper[4818]: I0930 17:15:43.504065 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt" Sep 30 17:15:43 crc kubenswrapper[4818]: I0930 17:15:43.643854 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c184175d-d18e-45cb-a79d-d27dfa315d2b-util\") pod \"c184175d-d18e-45cb-a79d-d27dfa315d2b\" (UID: \"c184175d-d18e-45cb-a79d-d27dfa315d2b\") " Sep 30 17:15:43 crc kubenswrapper[4818]: I0930 17:15:43.644285 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c184175d-d18e-45cb-a79d-d27dfa315d2b-bundle\") pod \"c184175d-d18e-45cb-a79d-d27dfa315d2b\" (UID: \"c184175d-d18e-45cb-a79d-d27dfa315d2b\") " Sep 30 17:15:43 crc kubenswrapper[4818]: I0930 17:15:43.644344 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wj7hm\" (UniqueName: \"kubernetes.io/projected/c184175d-d18e-45cb-a79d-d27dfa315d2b-kube-api-access-wj7hm\") pod \"c184175d-d18e-45cb-a79d-d27dfa315d2b\" (UID: \"c184175d-d18e-45cb-a79d-d27dfa315d2b\") " Sep 30 17:15:43 crc kubenswrapper[4818]: I0930 17:15:43.645732 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c184175d-d18e-45cb-a79d-d27dfa315d2b-bundle" (OuterVolumeSpecName: "bundle") pod "c184175d-d18e-45cb-a79d-d27dfa315d2b" (UID: "c184175d-d18e-45cb-a79d-d27dfa315d2b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:15:43 crc kubenswrapper[4818]: I0930 17:15:43.649894 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c184175d-d18e-45cb-a79d-d27dfa315d2b-kube-api-access-wj7hm" (OuterVolumeSpecName: "kube-api-access-wj7hm") pod "c184175d-d18e-45cb-a79d-d27dfa315d2b" (UID: "c184175d-d18e-45cb-a79d-d27dfa315d2b"). InnerVolumeSpecName "kube-api-access-wj7hm". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:15:43 crc kubenswrapper[4818]: I0930 17:15:43.659597 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c184175d-d18e-45cb-a79d-d27dfa315d2b-util" (OuterVolumeSpecName: "util") pod "c184175d-d18e-45cb-a79d-d27dfa315d2b" (UID: "c184175d-d18e-45cb-a79d-d27dfa315d2b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:15:43 crc kubenswrapper[4818]: I0930 17:15:43.745699 4818 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c184175d-d18e-45cb-a79d-d27dfa315d2b-util\") on node \"crc\" DevicePath \"\"" Sep 30 17:15:43 crc kubenswrapper[4818]: I0930 17:15:43.745732 4818 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c184175d-d18e-45cb-a79d-d27dfa315d2b-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:15:43 crc kubenswrapper[4818]: I0930 17:15:43.745744 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wj7hm\" (UniqueName: \"kubernetes.io/projected/c184175d-d18e-45cb-a79d-d27dfa315d2b-kube-api-access-wj7hm\") on node \"crc\" DevicePath \"\"" Sep 30 17:15:44 crc kubenswrapper[4818]: I0930 17:15:44.143243 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt" event={"ID":"c184175d-d18e-45cb-a79d-d27dfa315d2b","Type":"ContainerDied","Data":"312a7a76ad16c5843eb4fe88900e2bb8db1de727845ad24b2143b8e4d57195fe"} Sep 30 17:15:44 crc kubenswrapper[4818]: I0930 17:15:44.143282 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt" Sep 30 17:15:44 crc kubenswrapper[4818]: I0930 17:15:44.143287 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="312a7a76ad16c5843eb4fe88900e2bb8db1de727845ad24b2143b8e4d57195fe" Sep 30 17:15:47 crc kubenswrapper[4818]: I0930 17:15:47.221542 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz"] Sep 30 17:15:47 crc kubenswrapper[4818]: E0930 17:15:47.222230 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c184175d-d18e-45cb-a79d-d27dfa315d2b" containerName="extract" Sep 30 17:15:47 crc kubenswrapper[4818]: I0930 17:15:47.222244 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="c184175d-d18e-45cb-a79d-d27dfa315d2b" containerName="extract" Sep 30 17:15:47 crc kubenswrapper[4818]: E0930 17:15:47.222261 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c184175d-d18e-45cb-a79d-d27dfa315d2b" containerName="util" Sep 30 17:15:47 crc kubenswrapper[4818]: I0930 17:15:47.222268 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="c184175d-d18e-45cb-a79d-d27dfa315d2b" containerName="util" Sep 30 17:15:47 crc kubenswrapper[4818]: E0930 17:15:47.222276 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c184175d-d18e-45cb-a79d-d27dfa315d2b" containerName="pull" Sep 30 17:15:47 crc kubenswrapper[4818]: I0930 17:15:47.222281 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="c184175d-d18e-45cb-a79d-d27dfa315d2b" containerName="pull" Sep 30 17:15:47 crc kubenswrapper[4818]: I0930 17:15:47.222411 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="c184175d-d18e-45cb-a79d-d27dfa315d2b" containerName="extract" Sep 30 17:15:47 crc kubenswrapper[4818]: I0930 17:15:47.223281 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" Sep 30 17:15:47 crc kubenswrapper[4818]: I0930 17:15:47.226188 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-service-cert" Sep 30 17:15:47 crc kubenswrapper[4818]: I0930 17:15:47.240237 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz"] Sep 30 17:15:47 crc kubenswrapper[4818]: I0930 17:15:47.394798 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5ae87be5-c01b-4ae2-a11e-b43ee3329e2c-apiservice-cert\") pod \"watcher-operator-controller-manager-f4484ff7b-bgtxz\" (UID: \"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c\") " pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" Sep 30 17:15:47 crc kubenswrapper[4818]: I0930 17:15:47.395240 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rcjg\" (UniqueName: \"kubernetes.io/projected/5ae87be5-c01b-4ae2-a11e-b43ee3329e2c-kube-api-access-9rcjg\") pod \"watcher-operator-controller-manager-f4484ff7b-bgtxz\" (UID: \"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c\") " pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" Sep 30 17:15:47 crc kubenswrapper[4818]: I0930 17:15:47.395429 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5ae87be5-c01b-4ae2-a11e-b43ee3329e2c-webhook-cert\") pod \"watcher-operator-controller-manager-f4484ff7b-bgtxz\" (UID: \"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c\") " pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" Sep 30 17:15:47 crc kubenswrapper[4818]: I0930 17:15:47.496887 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rcjg\" (UniqueName: \"kubernetes.io/projected/5ae87be5-c01b-4ae2-a11e-b43ee3329e2c-kube-api-access-9rcjg\") pod \"watcher-operator-controller-manager-f4484ff7b-bgtxz\" (UID: \"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c\") " pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" Sep 30 17:15:47 crc kubenswrapper[4818]: I0930 17:15:47.497060 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5ae87be5-c01b-4ae2-a11e-b43ee3329e2c-webhook-cert\") pod \"watcher-operator-controller-manager-f4484ff7b-bgtxz\" (UID: \"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c\") " pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" Sep 30 17:15:47 crc kubenswrapper[4818]: I0930 17:15:47.497142 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5ae87be5-c01b-4ae2-a11e-b43ee3329e2c-apiservice-cert\") pod \"watcher-operator-controller-manager-f4484ff7b-bgtxz\" (UID: \"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c\") " pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" Sep 30 17:15:47 crc kubenswrapper[4818]: I0930 17:15:47.504413 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5ae87be5-c01b-4ae2-a11e-b43ee3329e2c-webhook-cert\") pod \"watcher-operator-controller-manager-f4484ff7b-bgtxz\" (UID: \"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c\") " pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" Sep 30 17:15:47 crc kubenswrapper[4818]: I0930 17:15:47.507698 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5ae87be5-c01b-4ae2-a11e-b43ee3329e2c-apiservice-cert\") pod \"watcher-operator-controller-manager-f4484ff7b-bgtxz\" (UID: \"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c\") " pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" Sep 30 17:15:47 crc kubenswrapper[4818]: I0930 17:15:47.517791 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rcjg\" (UniqueName: \"kubernetes.io/projected/5ae87be5-c01b-4ae2-a11e-b43ee3329e2c-kube-api-access-9rcjg\") pod \"watcher-operator-controller-manager-f4484ff7b-bgtxz\" (UID: \"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c\") " pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" Sep 30 17:15:47 crc kubenswrapper[4818]: I0930 17:15:47.543551 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" Sep 30 17:15:47 crc kubenswrapper[4818]: I0930 17:15:47.997016 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz"] Sep 30 17:15:48 crc kubenswrapper[4818]: I0930 17:15:48.185210 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" event={"ID":"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c","Type":"ContainerStarted","Data":"dafe5551f24e5726d41ffdb01726f8ece787bd4c67b9faf653aaddf358756152"} Sep 30 17:15:48 crc kubenswrapper[4818]: I0930 17:15:48.185253 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" event={"ID":"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c","Type":"ContainerStarted","Data":"c2bcd78aaad185b8a52bc81e2b7f8c9f44445ac1928f3114c61272134b50dc0c"} Sep 30 17:15:49 crc kubenswrapper[4818]: I0930 17:15:49.193637 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" event={"ID":"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c","Type":"ContainerStarted","Data":"3b2ede703c8cd6df74cd675c53c7cd9abe7e0b9ddf929186590318d5a10ea5bb"} Sep 30 17:15:49 crc kubenswrapper[4818]: I0930 17:15:49.194009 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" Sep 30 17:15:49 crc kubenswrapper[4818]: I0930 17:15:49.215138 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" podStartSLOduration=2.215115661 podStartE2EDuration="2.215115661s" podCreationTimestamp="2025-09-30 17:15:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:15:49.209085758 +0000 UTC m=+995.963357584" watchObservedRunningTime="2025-09-30 17:15:49.215115661 +0000 UTC m=+995.969387487" Sep 30 17:15:57 crc kubenswrapper[4818]: I0930 17:15:57.553916 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" Sep 30 17:15:57 crc kubenswrapper[4818]: I0930 17:15:57.667104 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j"] Sep 30 17:15:57 crc kubenswrapper[4818]: I0930 17:15:57.667373 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j" podUID="7e3342cd-7b9f-46d2-a977-25659bf9dd3a" containerName="manager" containerID="cri-o://39d4ea375ee35715369688e9eba44ce926e2f33e8cc2f70f9acead750d3791e9" gracePeriod=10 Sep 30 17:15:57 crc kubenswrapper[4818]: I0930 17:15:57.667447 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j" podUID="7e3342cd-7b9f-46d2-a977-25659bf9dd3a" containerName="kube-rbac-proxy" containerID="cri-o://86f63f7f2e232c805c27572f8e6878327b17d4cf447a01424be234ed307c2d04" gracePeriod=10 Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.040267 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j" Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.145342 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nb8hr\" (UniqueName: \"kubernetes.io/projected/7e3342cd-7b9f-46d2-a977-25659bf9dd3a-kube-api-access-nb8hr\") pod \"7e3342cd-7b9f-46d2-a977-25659bf9dd3a\" (UID: \"7e3342cd-7b9f-46d2-a977-25659bf9dd3a\") " Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.157303 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e3342cd-7b9f-46d2-a977-25659bf9dd3a-kube-api-access-nb8hr" (OuterVolumeSpecName: "kube-api-access-nb8hr") pod "7e3342cd-7b9f-46d2-a977-25659bf9dd3a" (UID: "7e3342cd-7b9f-46d2-a977-25659bf9dd3a"). InnerVolumeSpecName "kube-api-access-nb8hr". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.247417 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nb8hr\" (UniqueName: \"kubernetes.io/projected/7e3342cd-7b9f-46d2-a977-25659bf9dd3a-kube-api-access-nb8hr\") on node \"crc\" DevicePath \"\"" Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.273448 4818 generic.go:334] "Generic (PLEG): container finished" podID="7e3342cd-7b9f-46d2-a977-25659bf9dd3a" containerID="86f63f7f2e232c805c27572f8e6878327b17d4cf447a01424be234ed307c2d04" exitCode=0 Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.273477 4818 generic.go:334] "Generic (PLEG): container finished" podID="7e3342cd-7b9f-46d2-a977-25659bf9dd3a" containerID="39d4ea375ee35715369688e9eba44ce926e2f33e8cc2f70f9acead750d3791e9" exitCode=0 Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.273498 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j" event={"ID":"7e3342cd-7b9f-46d2-a977-25659bf9dd3a","Type":"ContainerDied","Data":"86f63f7f2e232c805c27572f8e6878327b17d4cf447a01424be234ed307c2d04"} Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.273525 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j" event={"ID":"7e3342cd-7b9f-46d2-a977-25659bf9dd3a","Type":"ContainerDied","Data":"39d4ea375ee35715369688e9eba44ce926e2f33e8cc2f70f9acead750d3791e9"} Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.273536 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j" event={"ID":"7e3342cd-7b9f-46d2-a977-25659bf9dd3a","Type":"ContainerDied","Data":"715a96cb1b3919b6b306992877186813035111b1547d5e2315f72d1e7fbcd1fb"} Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.273552 4818 scope.go:117] "RemoveContainer" containerID="86f63f7f2e232c805c27572f8e6878327b17d4cf447a01424be234ed307c2d04" Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.273557 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j" Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.308673 4818 scope.go:117] "RemoveContainer" containerID="39d4ea375ee35715369688e9eba44ce926e2f33e8cc2f70f9acead750d3791e9" Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.313802 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j"] Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.324833 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-ff74f75dc-55p9j"] Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.326596 4818 scope.go:117] "RemoveContainer" containerID="86f63f7f2e232c805c27572f8e6878327b17d4cf447a01424be234ed307c2d04" Sep 30 17:15:58 crc kubenswrapper[4818]: E0930 17:15:58.327087 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86f63f7f2e232c805c27572f8e6878327b17d4cf447a01424be234ed307c2d04\": container with ID starting with 86f63f7f2e232c805c27572f8e6878327b17d4cf447a01424be234ed307c2d04 not found: ID does not exist" containerID="86f63f7f2e232c805c27572f8e6878327b17d4cf447a01424be234ed307c2d04" Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.327119 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86f63f7f2e232c805c27572f8e6878327b17d4cf447a01424be234ed307c2d04"} err="failed to get container status \"86f63f7f2e232c805c27572f8e6878327b17d4cf447a01424be234ed307c2d04\": rpc error: code = NotFound desc = could not find container \"86f63f7f2e232c805c27572f8e6878327b17d4cf447a01424be234ed307c2d04\": container with ID starting with 86f63f7f2e232c805c27572f8e6878327b17d4cf447a01424be234ed307c2d04 not found: ID does not exist" Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.327139 4818 scope.go:117] "RemoveContainer" containerID="39d4ea375ee35715369688e9eba44ce926e2f33e8cc2f70f9acead750d3791e9" Sep 30 17:15:58 crc kubenswrapper[4818]: E0930 17:15:58.327366 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39d4ea375ee35715369688e9eba44ce926e2f33e8cc2f70f9acead750d3791e9\": container with ID starting with 39d4ea375ee35715369688e9eba44ce926e2f33e8cc2f70f9acead750d3791e9 not found: ID does not exist" containerID="39d4ea375ee35715369688e9eba44ce926e2f33e8cc2f70f9acead750d3791e9" Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.327389 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39d4ea375ee35715369688e9eba44ce926e2f33e8cc2f70f9acead750d3791e9"} err="failed to get container status \"39d4ea375ee35715369688e9eba44ce926e2f33e8cc2f70f9acead750d3791e9\": rpc error: code = NotFound desc = could not find container \"39d4ea375ee35715369688e9eba44ce926e2f33e8cc2f70f9acead750d3791e9\": container with ID starting with 39d4ea375ee35715369688e9eba44ce926e2f33e8cc2f70f9acead750d3791e9 not found: ID does not exist" Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.327403 4818 scope.go:117] "RemoveContainer" containerID="86f63f7f2e232c805c27572f8e6878327b17d4cf447a01424be234ed307c2d04" Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.327585 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86f63f7f2e232c805c27572f8e6878327b17d4cf447a01424be234ed307c2d04"} err="failed to get container status \"86f63f7f2e232c805c27572f8e6878327b17d4cf447a01424be234ed307c2d04\": rpc error: code = NotFound desc = could not find container \"86f63f7f2e232c805c27572f8e6878327b17d4cf447a01424be234ed307c2d04\": container with ID starting with 86f63f7f2e232c805c27572f8e6878327b17d4cf447a01424be234ed307c2d04 not found: ID does not exist" Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.327599 4818 scope.go:117] "RemoveContainer" containerID="39d4ea375ee35715369688e9eba44ce926e2f33e8cc2f70f9acead750d3791e9" Sep 30 17:15:58 crc kubenswrapper[4818]: I0930 17:15:58.327792 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39d4ea375ee35715369688e9eba44ce926e2f33e8cc2f70f9acead750d3791e9"} err="failed to get container status \"39d4ea375ee35715369688e9eba44ce926e2f33e8cc2f70f9acead750d3791e9\": rpc error: code = NotFound desc = could not find container \"39d4ea375ee35715369688e9eba44ce926e2f33e8cc2f70f9acead750d3791e9\": container with ID starting with 39d4ea375ee35715369688e9eba44ce926e2f33e8cc2f70f9acead750d3791e9 not found: ID does not exist" Sep 30 17:15:59 crc kubenswrapper[4818]: I0930 17:15:59.697343 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-54745555-zlxdf"] Sep 30 17:15:59 crc kubenswrapper[4818]: E0930 17:15:59.699015 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e3342cd-7b9f-46d2-a977-25659bf9dd3a" containerName="manager" Sep 30 17:15:59 crc kubenswrapper[4818]: I0930 17:15:59.699110 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e3342cd-7b9f-46d2-a977-25659bf9dd3a" containerName="manager" Sep 30 17:15:59 crc kubenswrapper[4818]: E0930 17:15:59.699211 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e3342cd-7b9f-46d2-a977-25659bf9dd3a" containerName="kube-rbac-proxy" Sep 30 17:15:59 crc kubenswrapper[4818]: I0930 17:15:59.699286 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e3342cd-7b9f-46d2-a977-25659bf9dd3a" containerName="kube-rbac-proxy" Sep 30 17:15:59 crc kubenswrapper[4818]: I0930 17:15:59.699562 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e3342cd-7b9f-46d2-a977-25659bf9dd3a" containerName="manager" Sep 30 17:15:59 crc kubenswrapper[4818]: I0930 17:15:59.699649 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e3342cd-7b9f-46d2-a977-25659bf9dd3a" containerName="kube-rbac-proxy" Sep 30 17:15:59 crc kubenswrapper[4818]: I0930 17:15:59.700654 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-54745555-zlxdf" Sep 30 17:15:59 crc kubenswrapper[4818]: I0930 17:15:59.723908 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-54745555-zlxdf"] Sep 30 17:15:59 crc kubenswrapper[4818]: I0930 17:15:59.872945 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7373131a-0a63-4ecd-a6ff-450382e6011b-apiservice-cert\") pod \"watcher-operator-controller-manager-54745555-zlxdf\" (UID: \"7373131a-0a63-4ecd-a6ff-450382e6011b\") " pod="openstack-operators/watcher-operator-controller-manager-54745555-zlxdf" Sep 30 17:15:59 crc kubenswrapper[4818]: I0930 17:15:59.873190 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7373131a-0a63-4ecd-a6ff-450382e6011b-webhook-cert\") pod \"watcher-operator-controller-manager-54745555-zlxdf\" (UID: \"7373131a-0a63-4ecd-a6ff-450382e6011b\") " pod="openstack-operators/watcher-operator-controller-manager-54745555-zlxdf" Sep 30 17:15:59 crc kubenswrapper[4818]: I0930 17:15:59.873297 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnv4v\" (UniqueName: \"kubernetes.io/projected/7373131a-0a63-4ecd-a6ff-450382e6011b-kube-api-access-lnv4v\") pod \"watcher-operator-controller-manager-54745555-zlxdf\" (UID: \"7373131a-0a63-4ecd-a6ff-450382e6011b\") " pod="openstack-operators/watcher-operator-controller-manager-54745555-zlxdf" Sep 30 17:15:59 crc kubenswrapper[4818]: I0930 17:15:59.974161 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7373131a-0a63-4ecd-a6ff-450382e6011b-apiservice-cert\") pod \"watcher-operator-controller-manager-54745555-zlxdf\" (UID: \"7373131a-0a63-4ecd-a6ff-450382e6011b\") " pod="openstack-operators/watcher-operator-controller-manager-54745555-zlxdf" Sep 30 17:15:59 crc kubenswrapper[4818]: I0930 17:15:59.974224 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7373131a-0a63-4ecd-a6ff-450382e6011b-webhook-cert\") pod \"watcher-operator-controller-manager-54745555-zlxdf\" (UID: \"7373131a-0a63-4ecd-a6ff-450382e6011b\") " pod="openstack-operators/watcher-operator-controller-manager-54745555-zlxdf" Sep 30 17:15:59 crc kubenswrapper[4818]: I0930 17:15:59.974284 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnv4v\" (UniqueName: \"kubernetes.io/projected/7373131a-0a63-4ecd-a6ff-450382e6011b-kube-api-access-lnv4v\") pod \"watcher-operator-controller-manager-54745555-zlxdf\" (UID: \"7373131a-0a63-4ecd-a6ff-450382e6011b\") " pod="openstack-operators/watcher-operator-controller-manager-54745555-zlxdf" Sep 30 17:15:59 crc kubenswrapper[4818]: I0930 17:15:59.978282 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7373131a-0a63-4ecd-a6ff-450382e6011b-apiservice-cert\") pod \"watcher-operator-controller-manager-54745555-zlxdf\" (UID: \"7373131a-0a63-4ecd-a6ff-450382e6011b\") " pod="openstack-operators/watcher-operator-controller-manager-54745555-zlxdf" Sep 30 17:15:59 crc kubenswrapper[4818]: I0930 17:15:59.980734 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7373131a-0a63-4ecd-a6ff-450382e6011b-webhook-cert\") pod \"watcher-operator-controller-manager-54745555-zlxdf\" (UID: \"7373131a-0a63-4ecd-a6ff-450382e6011b\") " pod="openstack-operators/watcher-operator-controller-manager-54745555-zlxdf" Sep 30 17:15:59 crc kubenswrapper[4818]: I0930 17:15:59.993153 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnv4v\" (UniqueName: \"kubernetes.io/projected/7373131a-0a63-4ecd-a6ff-450382e6011b-kube-api-access-lnv4v\") pod \"watcher-operator-controller-manager-54745555-zlxdf\" (UID: \"7373131a-0a63-4ecd-a6ff-450382e6011b\") " pod="openstack-operators/watcher-operator-controller-manager-54745555-zlxdf" Sep 30 17:16:00 crc kubenswrapper[4818]: I0930 17:16:00.024300 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-54745555-zlxdf" Sep 30 17:16:00 crc kubenswrapper[4818]: I0930 17:16:00.034773 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e3342cd-7b9f-46d2-a977-25659bf9dd3a" path="/var/lib/kubelet/pods/7e3342cd-7b9f-46d2-a977-25659bf9dd3a/volumes" Sep 30 17:16:00 crc kubenswrapper[4818]: I0930 17:16:00.526762 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-54745555-zlxdf"] Sep 30 17:16:01 crc kubenswrapper[4818]: I0930 17:16:01.302196 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-54745555-zlxdf" event={"ID":"7373131a-0a63-4ecd-a6ff-450382e6011b","Type":"ContainerStarted","Data":"3364d459f759a100fd8e521c1fadeaa8233e6cf223138123ce7c527a466f3db6"} Sep 30 17:16:01 crc kubenswrapper[4818]: I0930 17:16:01.302568 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-54745555-zlxdf" event={"ID":"7373131a-0a63-4ecd-a6ff-450382e6011b","Type":"ContainerStarted","Data":"078126c36780f268a0004bb18ef24f46257d06d2db4929645cd389c40b07df9b"} Sep 30 17:16:01 crc kubenswrapper[4818]: I0930 17:16:01.302608 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-54745555-zlxdf" Sep 30 17:16:01 crc kubenswrapper[4818]: I0930 17:16:01.302623 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-54745555-zlxdf" event={"ID":"7373131a-0a63-4ecd-a6ff-450382e6011b","Type":"ContainerStarted","Data":"51ceeec83d3c7bb45127c96e50611689cc5a0655b7f7807cc5312c28cb5037e3"} Sep 30 17:16:01 crc kubenswrapper[4818]: I0930 17:16:01.329394 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-54745555-zlxdf" podStartSLOduration=2.329379323 podStartE2EDuration="2.329379323s" podCreationTimestamp="2025-09-30 17:15:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:16:01.325621112 +0000 UTC m=+1008.079892928" watchObservedRunningTime="2025-09-30 17:16:01.329379323 +0000 UTC m=+1008.083651139" Sep 30 17:16:10 crc kubenswrapper[4818]: I0930 17:16:10.029676 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-54745555-zlxdf" Sep 30 17:16:10 crc kubenswrapper[4818]: I0930 17:16:10.096338 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz"] Sep 30 17:16:10 crc kubenswrapper[4818]: I0930 17:16:10.096618 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" podUID="5ae87be5-c01b-4ae2-a11e-b43ee3329e2c" containerName="manager" containerID="cri-o://dafe5551f24e5726d41ffdb01726f8ece787bd4c67b9faf653aaddf358756152" gracePeriod=10 Sep 30 17:16:10 crc kubenswrapper[4818]: I0930 17:16:10.096676 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" podUID="5ae87be5-c01b-4ae2-a11e-b43ee3329e2c" containerName="kube-rbac-proxy" containerID="cri-o://3b2ede703c8cd6df74cd675c53c7cd9abe7e0b9ddf929186590318d5a10ea5bb" gracePeriod=10 Sep 30 17:16:10 crc kubenswrapper[4818]: I0930 17:16:10.378958 4818 generic.go:334] "Generic (PLEG): container finished" podID="5ae87be5-c01b-4ae2-a11e-b43ee3329e2c" containerID="3b2ede703c8cd6df74cd675c53c7cd9abe7e0b9ddf929186590318d5a10ea5bb" exitCode=0 Sep 30 17:16:10 crc kubenswrapper[4818]: I0930 17:16:10.379222 4818 generic.go:334] "Generic (PLEG): container finished" podID="5ae87be5-c01b-4ae2-a11e-b43ee3329e2c" containerID="dafe5551f24e5726d41ffdb01726f8ece787bd4c67b9faf653aaddf358756152" exitCode=0 Sep 30 17:16:10 crc kubenswrapper[4818]: I0930 17:16:10.379241 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" event={"ID":"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c","Type":"ContainerDied","Data":"3b2ede703c8cd6df74cd675c53c7cd9abe7e0b9ddf929186590318d5a10ea5bb"} Sep 30 17:16:10 crc kubenswrapper[4818]: I0930 17:16:10.379264 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" event={"ID":"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c","Type":"ContainerDied","Data":"dafe5551f24e5726d41ffdb01726f8ece787bd4c67b9faf653aaddf358756152"} Sep 30 17:16:10 crc kubenswrapper[4818]: I0930 17:16:10.573518 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" Sep 30 17:16:10 crc kubenswrapper[4818]: I0930 17:16:10.731538 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9rcjg\" (UniqueName: \"kubernetes.io/projected/5ae87be5-c01b-4ae2-a11e-b43ee3329e2c-kube-api-access-9rcjg\") pod \"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c\" (UID: \"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c\") " Sep 30 17:16:10 crc kubenswrapper[4818]: I0930 17:16:10.731649 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5ae87be5-c01b-4ae2-a11e-b43ee3329e2c-webhook-cert\") pod \"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c\" (UID: \"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c\") " Sep 30 17:16:10 crc kubenswrapper[4818]: I0930 17:16:10.731709 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5ae87be5-c01b-4ae2-a11e-b43ee3329e2c-apiservice-cert\") pod \"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c\" (UID: \"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c\") " Sep 30 17:16:10 crc kubenswrapper[4818]: I0930 17:16:10.736990 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ae87be5-c01b-4ae2-a11e-b43ee3329e2c-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "5ae87be5-c01b-4ae2-a11e-b43ee3329e2c" (UID: "5ae87be5-c01b-4ae2-a11e-b43ee3329e2c"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:16:10 crc kubenswrapper[4818]: I0930 17:16:10.737177 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ae87be5-c01b-4ae2-a11e-b43ee3329e2c-kube-api-access-9rcjg" (OuterVolumeSpecName: "kube-api-access-9rcjg") pod "5ae87be5-c01b-4ae2-a11e-b43ee3329e2c" (UID: "5ae87be5-c01b-4ae2-a11e-b43ee3329e2c"). InnerVolumeSpecName "kube-api-access-9rcjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:16:10 crc kubenswrapper[4818]: I0930 17:16:10.741228 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ae87be5-c01b-4ae2-a11e-b43ee3329e2c-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "5ae87be5-c01b-4ae2-a11e-b43ee3329e2c" (UID: "5ae87be5-c01b-4ae2-a11e-b43ee3329e2c"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:16:10 crc kubenswrapper[4818]: I0930 17:16:10.833431 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9rcjg\" (UniqueName: \"kubernetes.io/projected/5ae87be5-c01b-4ae2-a11e-b43ee3329e2c-kube-api-access-9rcjg\") on node \"crc\" DevicePath \"\"" Sep 30 17:16:10 crc kubenswrapper[4818]: I0930 17:16:10.833476 4818 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5ae87be5-c01b-4ae2-a11e-b43ee3329e2c-webhook-cert\") on node \"crc\" DevicePath \"\"" Sep 30 17:16:10 crc kubenswrapper[4818]: I0930 17:16:10.833494 4818 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5ae87be5-c01b-4ae2-a11e-b43ee3329e2c-apiservice-cert\") on node \"crc\" DevicePath \"\"" Sep 30 17:16:11 crc kubenswrapper[4818]: I0930 17:16:11.390962 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" event={"ID":"5ae87be5-c01b-4ae2-a11e-b43ee3329e2c","Type":"ContainerDied","Data":"c2bcd78aaad185b8a52bc81e2b7f8c9f44445ac1928f3114c61272134b50dc0c"} Sep 30 17:16:11 crc kubenswrapper[4818]: I0930 17:16:11.391051 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz" Sep 30 17:16:11 crc kubenswrapper[4818]: I0930 17:16:11.391072 4818 scope.go:117] "RemoveContainer" containerID="3b2ede703c8cd6df74cd675c53c7cd9abe7e0b9ddf929186590318d5a10ea5bb" Sep 30 17:16:11 crc kubenswrapper[4818]: I0930 17:16:11.425180 4818 scope.go:117] "RemoveContainer" containerID="dafe5551f24e5726d41ffdb01726f8ece787bd4c67b9faf653aaddf358756152" Sep 30 17:16:11 crc kubenswrapper[4818]: I0930 17:16:11.439156 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz"] Sep 30 17:16:11 crc kubenswrapper[4818]: I0930 17:16:11.446390 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-f4484ff7b-bgtxz"] Sep 30 17:16:12 crc kubenswrapper[4818]: I0930 17:16:12.028932 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ae87be5-c01b-4ae2-a11e-b43ee3329e2c" path="/var/lib/kubelet/pods/5ae87be5-c01b-4ae2-a11e-b43ee3329e2c/volumes" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.106406 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/rabbitmq-server-0"] Sep 30 17:16:23 crc kubenswrapper[4818]: E0930 17:16:23.107498 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ae87be5-c01b-4ae2-a11e-b43ee3329e2c" containerName="kube-rbac-proxy" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.107520 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ae87be5-c01b-4ae2-a11e-b43ee3329e2c" containerName="kube-rbac-proxy" Sep 30 17:16:23 crc kubenswrapper[4818]: E0930 17:16:23.107584 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ae87be5-c01b-4ae2-a11e-b43ee3329e2c" containerName="manager" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.107597 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ae87be5-c01b-4ae2-a11e-b43ee3329e2c" containerName="manager" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.107822 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ae87be5-c01b-4ae2-a11e-b43ee3329e2c" containerName="manager" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.107848 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ae87be5-c01b-4ae2-a11e-b43ee3329e2c" containerName="kube-rbac-proxy" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.109044 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.111627 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"rabbitmq-default-user" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.114125 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"rabbitmq-erlang-cookie" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.114247 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"rabbitmq-config-data" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.114763 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"openshift-service-ca.crt" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.114845 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"rabbitmq-plugins-conf" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.115034 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-rabbitmq-svc" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.115044 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"rabbitmq-server-conf" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.115262 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"rabbitmq-server-dockercfg-zdr8v" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.115393 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"kube-root-ca.crt" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.127378 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/rabbitmq-server-0"] Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.234372 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f707c20f-09e2-4aa7-9a18-5b37f2050e45-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.234422 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f707c20f-09e2-4aa7-9a18-5b37f2050e45-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.234601 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f707c20f-09e2-4aa7-9a18-5b37f2050e45-server-conf\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.234726 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f707c20f-09e2-4aa7-9a18-5b37f2050e45-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.234776 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f707c20f-09e2-4aa7-9a18-5b37f2050e45-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.234856 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6d2r5\" (UniqueName: \"kubernetes.io/projected/f707c20f-09e2-4aa7-9a18-5b37f2050e45-kube-api-access-6d2r5\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.234904 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f707c20f-09e2-4aa7-9a18-5b37f2050e45-pod-info\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.234948 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f707c20f-09e2-4aa7-9a18-5b37f2050e45-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.234980 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-1a70d582-f91e-43fa-b471-9aaef7fc6b7d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1a70d582-f91e-43fa-b471-9aaef7fc6b7d\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.235017 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f707c20f-09e2-4aa7-9a18-5b37f2050e45-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.235050 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f707c20f-09e2-4aa7-9a18-5b37f2050e45-config-data\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.336456 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f707c20f-09e2-4aa7-9a18-5b37f2050e45-server-conf\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.336813 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f707c20f-09e2-4aa7-9a18-5b37f2050e45-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.337056 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f707c20f-09e2-4aa7-9a18-5b37f2050e45-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.337246 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6d2r5\" (UniqueName: \"kubernetes.io/projected/f707c20f-09e2-4aa7-9a18-5b37f2050e45-kube-api-access-6d2r5\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.337410 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f707c20f-09e2-4aa7-9a18-5b37f2050e45-pod-info\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.337589 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f707c20f-09e2-4aa7-9a18-5b37f2050e45-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.337782 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-1a70d582-f91e-43fa-b471-9aaef7fc6b7d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1a70d582-f91e-43fa-b471-9aaef7fc6b7d\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.337994 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f707c20f-09e2-4aa7-9a18-5b37f2050e45-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.338173 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f707c20f-09e2-4aa7-9a18-5b37f2050e45-config-data\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.338351 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f707c20f-09e2-4aa7-9a18-5b37f2050e45-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.338512 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f707c20f-09e2-4aa7-9a18-5b37f2050e45-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.337766 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f707c20f-09e2-4aa7-9a18-5b37f2050e45-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.338760 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f707c20f-09e2-4aa7-9a18-5b37f2050e45-server-conf\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.338962 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f707c20f-09e2-4aa7-9a18-5b37f2050e45-config-data\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.339122 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f707c20f-09e2-4aa7-9a18-5b37f2050e45-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.339209 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f707c20f-09e2-4aa7-9a18-5b37f2050e45-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.344206 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f707c20f-09e2-4aa7-9a18-5b37f2050e45-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.352810 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f707c20f-09e2-4aa7-9a18-5b37f2050e45-pod-info\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.352866 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f707c20f-09e2-4aa7-9a18-5b37f2050e45-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.354316 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f707c20f-09e2-4aa7-9a18-5b37f2050e45-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.357557 4818 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.357615 4818 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-1a70d582-f91e-43fa-b471-9aaef7fc6b7d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1a70d582-f91e-43fa-b471-9aaef7fc6b7d\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/c45ca86a6a27e94f5e98812a2928c216f9b0cd49a74cbf2dcffcd7e1d2368869/globalmount\"" pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.367613 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6d2r5\" (UniqueName: \"kubernetes.io/projected/f707c20f-09e2-4aa7-9a18-5b37f2050e45-kube-api-access-6d2r5\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.371108 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/rabbitmq-notifications-server-0"] Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.372888 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.376714 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"rabbitmq-notifications-erlang-cookie" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.376869 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"rabbitmq-notifications-server-dockercfg-mgk7w" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.377084 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"rabbitmq-notifications-server-conf" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.377228 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"rabbitmq-notifications-config-data" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.377655 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"rabbitmq-notifications-default-user" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.378378 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"rabbitmq-notifications-plugins-conf" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.380670 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-rabbitmq-notifications-svc" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.386017 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/rabbitmq-notifications-server-0"] Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.424995 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-1a70d582-f91e-43fa-b471-9aaef7fc6b7d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1a70d582-f91e-43fa-b471-9aaef7fc6b7d\") pod \"rabbitmq-server-0\" (UID: \"f707c20f-09e2-4aa7-9a18-5b37f2050e45\") " pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.430581 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.543107 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/68a4cecf-f627-497d-a682-5092ea0b3298-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.543195 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48sxl\" (UniqueName: \"kubernetes.io/projected/68a4cecf-f627-497d-a682-5092ea0b3298-kube-api-access-48sxl\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.543230 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/68a4cecf-f627-497d-a682-5092ea0b3298-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.543328 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/68a4cecf-f627-497d-a682-5092ea0b3298-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.543357 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/68a4cecf-f627-497d-a682-5092ea0b3298-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.543388 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-e2f333f3-e848-4815-b8c1-88ea7d03d1a7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e2f333f3-e848-4815-b8c1-88ea7d03d1a7\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.543485 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/68a4cecf-f627-497d-a682-5092ea0b3298-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.543548 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/68a4cecf-f627-497d-a682-5092ea0b3298-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.543578 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/68a4cecf-f627-497d-a682-5092ea0b3298-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.543593 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/68a4cecf-f627-497d-a682-5092ea0b3298-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.543625 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/68a4cecf-f627-497d-a682-5092ea0b3298-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.646572 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48sxl\" (UniqueName: \"kubernetes.io/projected/68a4cecf-f627-497d-a682-5092ea0b3298-kube-api-access-48sxl\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.646874 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/68a4cecf-f627-497d-a682-5092ea0b3298-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.646939 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/68a4cecf-f627-497d-a682-5092ea0b3298-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.646968 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/68a4cecf-f627-497d-a682-5092ea0b3298-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.646995 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-e2f333f3-e848-4815-b8c1-88ea7d03d1a7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e2f333f3-e848-4815-b8c1-88ea7d03d1a7\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.647039 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/68a4cecf-f627-497d-a682-5092ea0b3298-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.647068 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/68a4cecf-f627-497d-a682-5092ea0b3298-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.647088 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/68a4cecf-f627-497d-a682-5092ea0b3298-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.647101 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/68a4cecf-f627-497d-a682-5092ea0b3298-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.647119 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/68a4cecf-f627-497d-a682-5092ea0b3298-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.647138 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/68a4cecf-f627-497d-a682-5092ea0b3298-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.655949 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/68a4cecf-f627-497d-a682-5092ea0b3298-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.656451 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/68a4cecf-f627-497d-a682-5092ea0b3298-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.656627 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/68a4cecf-f627-497d-a682-5092ea0b3298-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.657945 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/68a4cecf-f627-497d-a682-5092ea0b3298-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.658191 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/68a4cecf-f627-497d-a682-5092ea0b3298-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.668311 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/68a4cecf-f627-497d-a682-5092ea0b3298-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.671551 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/68a4cecf-f627-497d-a682-5092ea0b3298-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.672134 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/68a4cecf-f627-497d-a682-5092ea0b3298-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.676866 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/68a4cecf-f627-497d-a682-5092ea0b3298-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.690197 4818 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.690239 4818 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-e2f333f3-e848-4815-b8c1-88ea7d03d1a7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e2f333f3-e848-4815-b8c1-88ea7d03d1a7\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/30d9c3a58059e1e21c6808c01e880372615cbd4a83cfdb5e89d3bccc1d5203b5/globalmount\"" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.712906 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48sxl\" (UniqueName: \"kubernetes.io/projected/68a4cecf-f627-497d-a682-5092ea0b3298-kube-api-access-48sxl\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:23 crc kubenswrapper[4818]: I0930 17:16:23.834787 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-e2f333f3-e848-4815-b8c1-88ea7d03d1a7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e2f333f3-e848-4815-b8c1-88ea7d03d1a7\") pod \"rabbitmq-notifications-server-0\" (UID: \"68a4cecf-f627-497d-a682-5092ea0b3298\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:24 crc kubenswrapper[4818]: W0930 17:16:24.027462 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf707c20f_09e2_4aa7_9a18_5b37f2050e45.slice/crio-4744057a6d153ba50f9fc98ce3f090169b844a90be6cae0f15e5761eaadfddbd WatchSource:0}: Error finding container 4744057a6d153ba50f9fc98ce3f090169b844a90be6cae0f15e5761eaadfddbd: Status 404 returned error can't find the container with id 4744057a6d153ba50f9fc98ce3f090169b844a90be6cae0f15e5761eaadfddbd Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.028353 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/rabbitmq-server-0"] Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.127972 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.371438 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/rabbitmq-notifications-server-0"] Sep 30 17:16:24 crc kubenswrapper[4818]: W0930 17:16:24.379531 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod68a4cecf_f627_497d_a682_5092ea0b3298.slice/crio-48222abd90d86d2c93526116d64c3800646da0e91b4589c48ca096feca5892f9 WatchSource:0}: Error finding container 48222abd90d86d2c93526116d64c3800646da0e91b4589c48ca096feca5892f9: Status 404 returned error can't find the container with id 48222abd90d86d2c93526116d64c3800646da0e91b4589c48ca096feca5892f9 Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.507803 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/rabbitmq-server-0" event={"ID":"f707c20f-09e2-4aa7-9a18-5b37f2050e45","Type":"ContainerStarted","Data":"4744057a6d153ba50f9fc98ce3f090169b844a90be6cae0f15e5761eaadfddbd"} Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.509092 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" event={"ID":"68a4cecf-f627-497d-a682-5092ea0b3298","Type":"ContainerStarted","Data":"48222abd90d86d2c93526116d64c3800646da0e91b4589c48ca096feca5892f9"} Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.830241 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/openstack-galera-0"] Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.831641 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.836820 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"galera-openstack-dockercfg-p96wj" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.837377 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"osp-secret" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.837629 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"openstack-scripts" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.837835 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-galera-openstack-svc" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.840234 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"openstack-config-data" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.848794 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"combined-ca-bundle" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.850899 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/openstack-galera-0"] Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.968344 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/memcached-0"] Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.970787 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/memcached-0" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.972003 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hpbcw\" (UniqueName: \"kubernetes.io/projected/7a7b6023-3d68-4aa9-a911-59017220edbf-kube-api-access-hpbcw\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.972062 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a7b6023-3d68-4aa9-a911-59017220edbf-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.972088 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a7b6023-3d68-4aa9-a911-59017220edbf-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.972115 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/7a7b6023-3d68-4aa9-a911-59017220edbf-secrets\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.972139 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7a7b6023-3d68-4aa9-a911-59017220edbf-kolla-config\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.972153 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a7b6023-3d68-4aa9-a911-59017220edbf-operator-scripts\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.972173 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7a7b6023-3d68-4aa9-a911-59017220edbf-config-data-generated\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.972194 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7a7b6023-3d68-4aa9-a911-59017220edbf-config-data-default\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.972244 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-0c0b78be-959b-4046-8d49-ecf87197a221\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0c0b78be-959b-4046-8d49-ecf87197a221\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.977273 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"memcached-memcached-dockercfg-xj47j" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.977506 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/memcached-0"] Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.977551 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"memcached-config-data" Sep 30 17:16:24 crc kubenswrapper[4818]: I0930 17:16:24.977654 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-memcached-svc" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.073469 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-0c0b78be-959b-4046-8d49-ecf87197a221\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0c0b78be-959b-4046-8d49-ecf87197a221\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.073844 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lb5bk\" (UniqueName: \"kubernetes.io/projected/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-kube-api-access-lb5bk\") pod \"memcached-0\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.073952 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hpbcw\" (UniqueName: \"kubernetes.io/projected/7a7b6023-3d68-4aa9-a911-59017220edbf-kube-api-access-hpbcw\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.073983 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a7b6023-3d68-4aa9-a911-59017220edbf-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.074096 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-memcached-tls-certs\") pod \"memcached-0\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.074132 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a7b6023-3d68-4aa9-a911-59017220edbf-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.074334 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-kolla-config\") pod \"memcached-0\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.074379 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/7a7b6023-3d68-4aa9-a911-59017220edbf-secrets\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.074611 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-config-data\") pod \"memcached-0\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.074637 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-combined-ca-bundle\") pod \"memcached-0\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.074656 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7a7b6023-3d68-4aa9-a911-59017220edbf-kolla-config\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.074839 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a7b6023-3d68-4aa9-a911-59017220edbf-operator-scripts\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.074859 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7a7b6023-3d68-4aa9-a911-59017220edbf-config-data-generated\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.074878 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7a7b6023-3d68-4aa9-a911-59017220edbf-config-data-default\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.075795 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7a7b6023-3d68-4aa9-a911-59017220edbf-kolla-config\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.076784 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7a7b6023-3d68-4aa9-a911-59017220edbf-config-data-generated\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.077265 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7a7b6023-3d68-4aa9-a911-59017220edbf-config-data-default\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.077509 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a7b6023-3d68-4aa9-a911-59017220edbf-operator-scripts\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.079433 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a7b6023-3d68-4aa9-a911-59017220edbf-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.086102 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a7b6023-3d68-4aa9-a911-59017220edbf-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.088816 4818 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.088869 4818 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-0c0b78be-959b-4046-8d49-ecf87197a221\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0c0b78be-959b-4046-8d49-ecf87197a221\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f90f5711806aa39bfbc45153bb2e1332435561835ba1a85e3522794fce950f40/globalmount\"" pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.091797 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/7a7b6023-3d68-4aa9-a911-59017220edbf-secrets\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.093401 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hpbcw\" (UniqueName: \"kubernetes.io/projected/7a7b6023-3d68-4aa9-a911-59017220edbf-kube-api-access-hpbcw\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.138193 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-0c0b78be-959b-4046-8d49-ecf87197a221\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0c0b78be-959b-4046-8d49-ecf87197a221\") pod \"openstack-galera-0\" (UID: \"7a7b6023-3d68-4aa9-a911-59017220edbf\") " pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.155878 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.177753 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lb5bk\" (UniqueName: \"kubernetes.io/projected/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-kube-api-access-lb5bk\") pod \"memcached-0\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.177815 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-memcached-tls-certs\") pod \"memcached-0\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.177842 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-kolla-config\") pod \"memcached-0\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.177867 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-config-data\") pod \"memcached-0\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.177884 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-combined-ca-bundle\") pod \"memcached-0\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.180820 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-config-data\") pod \"memcached-0\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.181002 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-kolla-config\") pod \"memcached-0\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.182253 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-combined-ca-bundle\") pod \"memcached-0\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.183353 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-memcached-tls-certs\") pod \"memcached-0\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.195070 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lb5bk\" (UniqueName: \"kubernetes.io/projected/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-kube-api-access-lb5bk\") pod \"memcached-0\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.292783 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/memcached-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.318620 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/kube-state-metrics-0"] Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.319681 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.331495 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/kube-state-metrics-0"] Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.335870 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"telemetry-ceilometer-dockercfg-lcqj4" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.394244 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54vp5\" (UniqueName: \"kubernetes.io/projected/808a26d0-141a-4e9e-8920-933c31423097-kube-api-access-54vp5\") pod \"kube-state-metrics-0\" (UID: \"808a26d0-141a-4e9e-8920-933c31423097\") " pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.495999 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54vp5\" (UniqueName: \"kubernetes.io/projected/808a26d0-141a-4e9e-8920-933c31423097-kube-api-access-54vp5\") pod \"kube-state-metrics-0\" (UID: \"808a26d0-141a-4e9e-8920-933c31423097\") " pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.524604 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54vp5\" (UniqueName: \"kubernetes.io/projected/808a26d0-141a-4e9e-8920-933c31423097-kube-api-access-54vp5\") pod \"kube-state-metrics-0\" (UID: \"808a26d0-141a-4e9e-8920-933c31423097\") " pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.741331 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.756917 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/openstack-galera-0"] Sep 30 17:16:25 crc kubenswrapper[4818]: W0930 17:16:25.775012 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7a7b6023_3d68_4aa9_a911_59017220edbf.slice/crio-32060f7200509d77c5f12a7be62e1e49b9f2e7a0185bf415424b8f935344dabf WatchSource:0}: Error finding container 32060f7200509d77c5f12a7be62e1e49b9f2e7a0185bf415424b8f935344dabf: Status 404 returned error can't find the container with id 32060f7200509d77c5f12a7be62e1e49b9f2e7a0185bf415424b8f935344dabf Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.885659 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/alertmanager-metric-storage-0"] Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.888775 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.893132 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"alertmanager-metric-storage-generated" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.896764 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"alertmanager-metric-storage-web-config" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.896979 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"alertmanager-metric-storage-tls-assets-0" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.897133 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"metric-storage-alertmanager-dockercfg-rjhc2" Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.906661 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/alertmanager-metric-storage-0"] Sep 30 17:16:25 crc kubenswrapper[4818]: I0930 17:16:25.942955 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/memcached-0"] Sep 30 17:16:25 crc kubenswrapper[4818]: W0930 17:16:25.960886 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poded2f758c_c27c_4554_bcb1_c8be8a0e2e55.slice/crio-b55f6e239085ca48692772ed9621b73d67ded65fcbe033c2563097c1647f912c WatchSource:0}: Error finding container b55f6e239085ca48692772ed9621b73d67ded65fcbe033c2563097c1647f912c: Status 404 returned error can't find the container with id b55f6e239085ca48692772ed9621b73d67ded65fcbe033c2563097c1647f912c Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.005529 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/81366e0f-12de-49bd-8834-68b2d0da319b-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"81366e0f-12de-49bd-8834-68b2d0da319b\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.005615 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/81366e0f-12de-49bd-8834-68b2d0da319b-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"81366e0f-12de-49bd-8834-68b2d0da319b\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.005652 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/81366e0f-12de-49bd-8834-68b2d0da319b-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"81366e0f-12de-49bd-8834-68b2d0da319b\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.005802 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pl2n4\" (UniqueName: \"kubernetes.io/projected/81366e0f-12de-49bd-8834-68b2d0da319b-kube-api-access-pl2n4\") pod \"alertmanager-metric-storage-0\" (UID: \"81366e0f-12de-49bd-8834-68b2d0da319b\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.005881 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/81366e0f-12de-49bd-8834-68b2d0da319b-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"81366e0f-12de-49bd-8834-68b2d0da319b\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.006086 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/81366e0f-12de-49bd-8834-68b2d0da319b-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"81366e0f-12de-49bd-8834-68b2d0da319b\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.107662 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/81366e0f-12de-49bd-8834-68b2d0da319b-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"81366e0f-12de-49bd-8834-68b2d0da319b\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.107710 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/81366e0f-12de-49bd-8834-68b2d0da319b-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"81366e0f-12de-49bd-8834-68b2d0da319b\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.107753 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pl2n4\" (UniqueName: \"kubernetes.io/projected/81366e0f-12de-49bd-8834-68b2d0da319b-kube-api-access-pl2n4\") pod \"alertmanager-metric-storage-0\" (UID: \"81366e0f-12de-49bd-8834-68b2d0da319b\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.107777 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/81366e0f-12de-49bd-8834-68b2d0da319b-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"81366e0f-12de-49bd-8834-68b2d0da319b\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.107823 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/81366e0f-12de-49bd-8834-68b2d0da319b-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"81366e0f-12de-49bd-8834-68b2d0da319b\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.107847 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/81366e0f-12de-49bd-8834-68b2d0da319b-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"81366e0f-12de-49bd-8834-68b2d0da319b\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.108275 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/81366e0f-12de-49bd-8834-68b2d0da319b-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"81366e0f-12de-49bd-8834-68b2d0da319b\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.117734 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/81366e0f-12de-49bd-8834-68b2d0da319b-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"81366e0f-12de-49bd-8834-68b2d0da319b\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.117764 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/81366e0f-12de-49bd-8834-68b2d0da319b-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"81366e0f-12de-49bd-8834-68b2d0da319b\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.117806 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/81366e0f-12de-49bd-8834-68b2d0da319b-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"81366e0f-12de-49bd-8834-68b2d0da319b\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.120439 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/81366e0f-12de-49bd-8834-68b2d0da319b-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"81366e0f-12de-49bd-8834-68b2d0da319b\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.128253 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pl2n4\" (UniqueName: \"kubernetes.io/projected/81366e0f-12de-49bd-8834-68b2d0da319b-kube-api-access-pl2n4\") pod \"alertmanager-metric-storage-0\" (UID: \"81366e0f-12de-49bd-8834-68b2d0da319b\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.223949 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.235355 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-ui-dashboards-6584dc9448-hfpnh"] Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.236430 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-6584dc9448-hfpnh" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.241506 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.241705 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards-sa-dockercfg-5mvns" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.265638 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-6584dc9448-hfpnh"] Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.304651 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/kube-state-metrics-0"] Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.310976 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/576f7b1f-338a-4be8-a516-10ef07224f16-serving-cert\") pod \"observability-ui-dashboards-6584dc9448-hfpnh\" (UID: \"576f7b1f-338a-4be8-a516-10ef07224f16\") " pod="openshift-operators/observability-ui-dashboards-6584dc9448-hfpnh" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.311041 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shtpb\" (UniqueName: \"kubernetes.io/projected/576f7b1f-338a-4be8-a516-10ef07224f16-kube-api-access-shtpb\") pod \"observability-ui-dashboards-6584dc9448-hfpnh\" (UID: \"576f7b1f-338a-4be8-a516-10ef07224f16\") " pod="openshift-operators/observability-ui-dashboards-6584dc9448-hfpnh" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.412010 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/576f7b1f-338a-4be8-a516-10ef07224f16-serving-cert\") pod \"observability-ui-dashboards-6584dc9448-hfpnh\" (UID: \"576f7b1f-338a-4be8-a516-10ef07224f16\") " pod="openshift-operators/observability-ui-dashboards-6584dc9448-hfpnh" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.412421 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shtpb\" (UniqueName: \"kubernetes.io/projected/576f7b1f-338a-4be8-a516-10ef07224f16-kube-api-access-shtpb\") pod \"observability-ui-dashboards-6584dc9448-hfpnh\" (UID: \"576f7b1f-338a-4be8-a516-10ef07224f16\") " pod="openshift-operators/observability-ui-dashboards-6584dc9448-hfpnh" Sep 30 17:16:26 crc kubenswrapper[4818]: E0930 17:16:26.412224 4818 secret.go:188] Couldn't get secret openshift-operators/observability-ui-dashboards: secret "observability-ui-dashboards" not found Sep 30 17:16:26 crc kubenswrapper[4818]: E0930 17:16:26.412527 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/576f7b1f-338a-4be8-a516-10ef07224f16-serving-cert podName:576f7b1f-338a-4be8-a516-10ef07224f16 nodeName:}" failed. No retries permitted until 2025-09-30 17:16:26.912500198 +0000 UTC m=+1033.666772094 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/576f7b1f-338a-4be8-a516-10ef07224f16-serving-cert") pod "observability-ui-dashboards-6584dc9448-hfpnh" (UID: "576f7b1f-338a-4be8-a516-10ef07224f16") : secret "observability-ui-dashboards" not found Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.449790 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shtpb\" (UniqueName: \"kubernetes.io/projected/576f7b1f-338a-4be8-a516-10ef07224f16-kube-api-access-shtpb\") pod \"observability-ui-dashboards-6584dc9448-hfpnh\" (UID: \"576f7b1f-338a-4be8-a516-10ef07224f16\") " pod="openshift-operators/observability-ui-dashboards-6584dc9448-hfpnh" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.573446 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/prometheus-metric-storage-0"] Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.575867 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.581368 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"prometheus-metric-storage-rulefiles-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.581536 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"prometheus-metric-storage" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.581681 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"prometheus-metric-storage-tls-assets-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.584514 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.584700 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"metric-storage-prometheus-dockercfg-cfxgj" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.584816 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"prometheus-metric-storage-web-config" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.588266 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/kube-state-metrics-0" event={"ID":"808a26d0-141a-4e9e-8920-933c31423097","Type":"ContainerStarted","Data":"b2273c16b344157bbfb2b455f500155861652d4b92fc8cc889564b55a5e07225"} Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.599204 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/openstack-galera-0" event={"ID":"7a7b6023-3d68-4aa9-a911-59017220edbf","Type":"ContainerStarted","Data":"32060f7200509d77c5f12a7be62e1e49b9f2e7a0185bf415424b8f935344dabf"} Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.600447 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/prometheus-metric-storage-0"] Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.601583 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/memcached-0" event={"ID":"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55","Type":"ContainerStarted","Data":"b55f6e239085ca48692772ed9621b73d67ded65fcbe033c2563097c1647f912c"} Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.654097 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-7789fdbb8c-6m8b8"] Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.655025 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.681550 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7789fdbb8c-6m8b8"] Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.726124 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/2be08df0-9fca-4f67-b75a-e6b2cd35767d-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.726185 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/2be08df0-9fca-4f67-b75a-e6b2cd35767d-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.726227 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c693263b-256a-439d-90ce-21357f715bb5-oauth-serving-cert\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.726288 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c693263b-256a-439d-90ce-21357f715bb5-console-serving-cert\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.726308 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c693263b-256a-439d-90ce-21357f715bb5-service-ca\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.726340 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c693263b-256a-439d-90ce-21357f715bb5-console-config\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.726364 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zqnv\" (UniqueName: \"kubernetes.io/projected/c693263b-256a-439d-90ce-21357f715bb5-kube-api-access-4zqnv\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.726386 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c693263b-256a-439d-90ce-21357f715bb5-console-oauth-config\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.726486 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/2be08df0-9fca-4f67-b75a-e6b2cd35767d-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.726562 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c693263b-256a-439d-90ce-21357f715bb5-trusted-ca-bundle\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.726581 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/2be08df0-9fca-4f67-b75a-e6b2cd35767d-config\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.726676 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.726717 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/2be08df0-9fca-4f67-b75a-e6b2cd35767d-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.727473 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5q7m\" (UniqueName: \"kubernetes.io/projected/2be08df0-9fca-4f67-b75a-e6b2cd35767d-kube-api-access-g5q7m\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.727536 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/2be08df0-9fca-4f67-b75a-e6b2cd35767d-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.829253 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/2be08df0-9fca-4f67-b75a-e6b2cd35767d-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.829307 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c693263b-256a-439d-90ce-21357f715bb5-oauth-serving-cert\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.829352 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c693263b-256a-439d-90ce-21357f715bb5-console-serving-cert\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.829375 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c693263b-256a-439d-90ce-21357f715bb5-service-ca\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.829397 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c693263b-256a-439d-90ce-21357f715bb5-console-config\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.829418 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zqnv\" (UniqueName: \"kubernetes.io/projected/c693263b-256a-439d-90ce-21357f715bb5-kube-api-access-4zqnv\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.829439 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c693263b-256a-439d-90ce-21357f715bb5-console-oauth-config\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.829463 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/2be08df0-9fca-4f67-b75a-e6b2cd35767d-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.829480 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c693263b-256a-439d-90ce-21357f715bb5-trusted-ca-bundle\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.829498 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/2be08df0-9fca-4f67-b75a-e6b2cd35767d-config\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.829528 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.829546 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/2be08df0-9fca-4f67-b75a-e6b2cd35767d-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.829568 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5q7m\" (UniqueName: \"kubernetes.io/projected/2be08df0-9fca-4f67-b75a-e6b2cd35767d-kube-api-access-g5q7m\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.829602 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/2be08df0-9fca-4f67-b75a-e6b2cd35767d-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.829622 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/2be08df0-9fca-4f67-b75a-e6b2cd35767d-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.830366 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/2be08df0-9fca-4f67-b75a-e6b2cd35767d-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.837063 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c693263b-256a-439d-90ce-21357f715bb5-console-config\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.855106 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c693263b-256a-439d-90ce-21357f715bb5-service-ca\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.856536 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c693263b-256a-439d-90ce-21357f715bb5-oauth-serving-cert\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.858839 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c693263b-256a-439d-90ce-21357f715bb5-trusted-ca-bundle\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.875691 4818 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.875739 4818 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9122b16786948abe01c7647c3bcde8c957fd8b046438da381e77282d5c0b955d/globalmount\"" pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.884147 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5q7m\" (UniqueName: \"kubernetes.io/projected/2be08df0-9fca-4f67-b75a-e6b2cd35767d-kube-api-access-g5q7m\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.885370 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/2be08df0-9fca-4f67-b75a-e6b2cd35767d-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.917180 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zqnv\" (UniqueName: \"kubernetes.io/projected/c693263b-256a-439d-90ce-21357f715bb5-kube-api-access-4zqnv\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.932124 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/2be08df0-9fca-4f67-b75a-e6b2cd35767d-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.932519 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/576f7b1f-338a-4be8-a516-10ef07224f16-serving-cert\") pod \"observability-ui-dashboards-6584dc9448-hfpnh\" (UID: \"576f7b1f-338a-4be8-a516-10ef07224f16\") " pod="openshift-operators/observability-ui-dashboards-6584dc9448-hfpnh" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.940539 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/2be08df0-9fca-4f67-b75a-e6b2cd35767d-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.942286 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c693263b-256a-439d-90ce-21357f715bb5-console-oauth-config\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.942860 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/576f7b1f-338a-4be8-a516-10ef07224f16-serving-cert\") pod \"observability-ui-dashboards-6584dc9448-hfpnh\" (UID: \"576f7b1f-338a-4be8-a516-10ef07224f16\") " pod="openshift-operators/observability-ui-dashboards-6584dc9448-hfpnh" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.944641 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/2be08df0-9fca-4f67-b75a-e6b2cd35767d-config\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.945457 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c693263b-256a-439d-90ce-21357f715bb5-console-serving-cert\") pod \"console-7789fdbb8c-6m8b8\" (UID: \"c693263b-256a-439d-90ce-21357f715bb5\") " pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.946396 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/2be08df0-9fca-4f67-b75a-e6b2cd35767d-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.967155 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/alertmanager-metric-storage-0"] Sep 30 17:16:26 crc kubenswrapper[4818]: I0930 17:16:26.997840 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:27 crc kubenswrapper[4818]: I0930 17:16:27.121965 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\") pod \"prometheus-metric-storage-0\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:27 crc kubenswrapper[4818]: I0930 17:16:27.177484 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-6584dc9448-hfpnh" Sep 30 17:16:27 crc kubenswrapper[4818]: I0930 17:16:27.210678 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:16:27 crc kubenswrapper[4818]: I0930 17:16:27.611203 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/alertmanager-metric-storage-0" event={"ID":"81366e0f-12de-49bd-8834-68b2d0da319b","Type":"ContainerStarted","Data":"6be47814b57f136d0f6cf68aeec3ae481ae56f9627b6bef888795023f174a53a"} Sep 30 17:16:27 crc kubenswrapper[4818]: I0930 17:16:27.629050 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7789fdbb8c-6m8b8"] Sep 30 17:16:28 crc kubenswrapper[4818]: I0930 17:16:28.059377 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-6584dc9448-hfpnh"] Sep 30 17:16:28 crc kubenswrapper[4818]: I0930 17:16:28.140110 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/prometheus-metric-storage-0"] Sep 30 17:16:28 crc kubenswrapper[4818]: W0930 17:16:28.379867 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc693263b_256a_439d_90ce_21357f715bb5.slice/crio-d8e6472dbb5b1565954fb3de291b19cce7d83b4cebd07ce01cc8ec062a198d1d WatchSource:0}: Error finding container d8e6472dbb5b1565954fb3de291b19cce7d83b4cebd07ce01cc8ec062a198d1d: Status 404 returned error can't find the container with id d8e6472dbb5b1565954fb3de291b19cce7d83b4cebd07ce01cc8ec062a198d1d Sep 30 17:16:28 crc kubenswrapper[4818]: W0930 17:16:28.381672 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod576f7b1f_338a_4be8_a516_10ef07224f16.slice/crio-47cd955dd4bec527e10fef2956343cedc2d8bee9eb60ccff3679311bcc06b5ab WatchSource:0}: Error finding container 47cd955dd4bec527e10fef2956343cedc2d8bee9eb60ccff3679311bcc06b5ab: Status 404 returned error can't find the container with id 47cd955dd4bec527e10fef2956343cedc2d8bee9eb60ccff3679311bcc06b5ab Sep 30 17:16:28 crc kubenswrapper[4818]: I0930 17:16:28.629172 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"2be08df0-9fca-4f67-b75a-e6b2cd35767d","Type":"ContainerStarted","Data":"fde2d345a9979ec0acba75c9a672285afa9ef5d430865612c92557943fbbb7df"} Sep 30 17:16:28 crc kubenswrapper[4818]: I0930 17:16:28.630308 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7789fdbb8c-6m8b8" event={"ID":"c693263b-256a-439d-90ce-21357f715bb5","Type":"ContainerStarted","Data":"d8e6472dbb5b1565954fb3de291b19cce7d83b4cebd07ce01cc8ec062a198d1d"} Sep 30 17:16:28 crc kubenswrapper[4818]: I0930 17:16:28.631179 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-6584dc9448-hfpnh" event={"ID":"576f7b1f-338a-4be8-a516-10ef07224f16","Type":"ContainerStarted","Data":"47cd955dd4bec527e10fef2956343cedc2d8bee9eb60ccff3679311bcc06b5ab"} Sep 30 17:16:32 crc kubenswrapper[4818]: I0930 17:16:32.667855 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7789fdbb8c-6m8b8" event={"ID":"c693263b-256a-439d-90ce-21357f715bb5","Type":"ContainerStarted","Data":"e43101b7db69c5b9f1626f546a30fa9a89567cc779ca61b790a86f1ee2400873"} Sep 30 17:16:32 crc kubenswrapper[4818]: I0930 17:16:32.695303 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-7789fdbb8c-6m8b8" podStartSLOduration=6.695280114 podStartE2EDuration="6.695280114s" podCreationTimestamp="2025-09-30 17:16:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:16:32.691161223 +0000 UTC m=+1039.445433069" watchObservedRunningTime="2025-09-30 17:16:32.695280114 +0000 UTC m=+1039.449551940" Sep 30 17:16:36 crc kubenswrapper[4818]: I0930 17:16:36.999150 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:36 crc kubenswrapper[4818]: I0930 17:16:36.999489 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:37 crc kubenswrapper[4818]: I0930 17:16:37.003690 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:37 crc kubenswrapper[4818]: I0930 17:16:37.712857 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7789fdbb8c-6m8b8" Sep 30 17:16:37 crc kubenswrapper[4818]: I0930 17:16:37.768142 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-54465874f9-657tn"] Sep 30 17:16:39 crc kubenswrapper[4818]: I0930 17:16:39.727366 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/kube-state-metrics-0" event={"ID":"808a26d0-141a-4e9e-8920-933c31423097","Type":"ContainerStarted","Data":"5de37df6358aea0ebc5871a973fa96cc0cb9c6cc58b5ffb760db9cdac7e436ea"} Sep 30 17:16:39 crc kubenswrapper[4818]: I0930 17:16:39.729604 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:16:39 crc kubenswrapper[4818]: I0930 17:16:39.737437 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/openstack-galera-0" event={"ID":"7a7b6023-3d68-4aa9-a911-59017220edbf","Type":"ContainerStarted","Data":"807b3eb0cb1c31a2f6f382ab8736b332ba4998840b6f1a8dbe9603117b035b0f"} Sep 30 17:16:39 crc kubenswrapper[4818]: I0930 17:16:39.739370 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/memcached-0" event={"ID":"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55","Type":"ContainerStarted","Data":"a3df436dd4e579476af80c23e04a26cd1b2a4d56e5694dffe2507520eb42dc29"} Sep 30 17:16:39 crc kubenswrapper[4818]: I0930 17:16:39.739880 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/memcached-0" Sep 30 17:16:39 crc kubenswrapper[4818]: I0930 17:16:39.741093 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-6584dc9448-hfpnh" event={"ID":"576f7b1f-338a-4be8-a516-10ef07224f16","Type":"ContainerStarted","Data":"abc1d12da869bfe89013aae71c65a982cde2e1f860b9ee757188cfce578ec0c7"} Sep 30 17:16:39 crc kubenswrapper[4818]: I0930 17:16:39.742747 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" event={"ID":"68a4cecf-f627-497d-a682-5092ea0b3298","Type":"ContainerStarted","Data":"93ba45143754109a4ad420cd007cbc0eee8def3286d9c62258b0365114cc8400"} Sep 30 17:16:39 crc kubenswrapper[4818]: I0930 17:16:39.744569 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/rabbitmq-server-0" event={"ID":"f707c20f-09e2-4aa7-9a18-5b37f2050e45","Type":"ContainerStarted","Data":"2e142b77678decc1befcd1414b7a0a72a65e60fc1dbbb46829e8e7d0e05a73af"} Sep 30 17:16:39 crc kubenswrapper[4818]: I0930 17:16:39.746769 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/kube-state-metrics-0" podStartSLOduration=3.046561078 podStartE2EDuration="14.746745459s" podCreationTimestamp="2025-09-30 17:16:25 +0000 UTC" firstStartedPulling="2025-09-30 17:16:26.331445307 +0000 UTC m=+1033.085717123" lastFinishedPulling="2025-09-30 17:16:38.031629688 +0000 UTC m=+1044.785901504" observedRunningTime="2025-09-30 17:16:39.74570684 +0000 UTC m=+1046.499978706" watchObservedRunningTime="2025-09-30 17:16:39.746745459 +0000 UTC m=+1046.501017285" Sep 30 17:16:39 crc kubenswrapper[4818]: I0930 17:16:39.765580 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-ui-dashboards-6584dc9448-hfpnh" podStartSLOduration=3.55416467 podStartE2EDuration="13.765562787s" podCreationTimestamp="2025-09-30 17:16:26 +0000 UTC" firstStartedPulling="2025-09-30 17:16:28.384723859 +0000 UTC m=+1035.138995675" lastFinishedPulling="2025-09-30 17:16:38.596121956 +0000 UTC m=+1045.350393792" observedRunningTime="2025-09-30 17:16:39.760737727 +0000 UTC m=+1046.515009543" watchObservedRunningTime="2025-09-30 17:16:39.765562787 +0000 UTC m=+1046.519834613" Sep 30 17:16:39 crc kubenswrapper[4818]: I0930 17:16:39.823246 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/memcached-0" podStartSLOduration=3.315148846 podStartE2EDuration="15.823230075s" podCreationTimestamp="2025-09-30 17:16:24 +0000 UTC" firstStartedPulling="2025-09-30 17:16:25.963193203 +0000 UTC m=+1032.717465019" lastFinishedPulling="2025-09-30 17:16:38.471274432 +0000 UTC m=+1045.225546248" observedRunningTime="2025-09-30 17:16:39.788394774 +0000 UTC m=+1046.542666620" watchObservedRunningTime="2025-09-30 17:16:39.823230075 +0000 UTC m=+1046.577501891" Sep 30 17:16:41 crc kubenswrapper[4818]: I0930 17:16:41.761682 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/alertmanager-metric-storage-0" event={"ID":"81366e0f-12de-49bd-8834-68b2d0da319b","Type":"ContainerStarted","Data":"8f7b6c7757173ce5242345e53e8128a254975333df50dbaea5c9557b3db8a489"} Sep 30 17:16:41 crc kubenswrapper[4818]: I0930 17:16:41.765753 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"2be08df0-9fca-4f67-b75a-e6b2cd35767d","Type":"ContainerStarted","Data":"c34297f32613b332296e0dc5de926f272afc6e5a3dfc0ab275d3c79a153763fa"} Sep 30 17:16:43 crc kubenswrapper[4818]: I0930 17:16:43.789315 4818 generic.go:334] "Generic (PLEG): container finished" podID="7a7b6023-3d68-4aa9-a911-59017220edbf" containerID="807b3eb0cb1c31a2f6f382ab8736b332ba4998840b6f1a8dbe9603117b035b0f" exitCode=0 Sep 30 17:16:43 crc kubenswrapper[4818]: I0930 17:16:43.789428 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/openstack-galera-0" event={"ID":"7a7b6023-3d68-4aa9-a911-59017220edbf","Type":"ContainerDied","Data":"807b3eb0cb1c31a2f6f382ab8736b332ba4998840b6f1a8dbe9603117b035b0f"} Sep 30 17:16:44 crc kubenswrapper[4818]: I0930 17:16:44.798632 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/openstack-galera-0" event={"ID":"7a7b6023-3d68-4aa9-a911-59017220edbf","Type":"ContainerStarted","Data":"d1e51d12b6123d31afa177fd5c960b81a17c8169a9c9ed80f82dbc5a1d8c70d1"} Sep 30 17:16:44 crc kubenswrapper[4818]: I0930 17:16:44.833692 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/openstack-galera-0" podStartSLOduration=9.030760611 podStartE2EDuration="21.833674049s" podCreationTimestamp="2025-09-30 17:16:23 +0000 UTC" firstStartedPulling="2025-09-30 17:16:25.783352222 +0000 UTC m=+1032.537624038" lastFinishedPulling="2025-09-30 17:16:38.58626566 +0000 UTC m=+1045.340537476" observedRunningTime="2025-09-30 17:16:44.832040855 +0000 UTC m=+1051.586312671" watchObservedRunningTime="2025-09-30 17:16:44.833674049 +0000 UTC m=+1051.587945865" Sep 30 17:16:45 crc kubenswrapper[4818]: I0930 17:16:45.156421 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:45 crc kubenswrapper[4818]: I0930 17:16:45.156470 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:45 crc kubenswrapper[4818]: I0930 17:16:45.294184 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/memcached-0" Sep 30 17:16:45 crc kubenswrapper[4818]: I0930 17:16:45.747494 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:16:49 crc kubenswrapper[4818]: I0930 17:16:49.239476 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:49 crc kubenswrapper[4818]: I0930 17:16:49.319724 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/openstack-galera-0" Sep 30 17:16:49 crc kubenswrapper[4818]: I0930 17:16:49.832722 4818 generic.go:334] "Generic (PLEG): container finished" podID="81366e0f-12de-49bd-8834-68b2d0da319b" containerID="8f7b6c7757173ce5242345e53e8128a254975333df50dbaea5c9557b3db8a489" exitCode=0 Sep 30 17:16:49 crc kubenswrapper[4818]: I0930 17:16:49.832811 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/alertmanager-metric-storage-0" event={"ID":"81366e0f-12de-49bd-8834-68b2d0da319b","Type":"ContainerDied","Data":"8f7b6c7757173ce5242345e53e8128a254975333df50dbaea5c9557b3db8a489"} Sep 30 17:16:49 crc kubenswrapper[4818]: I0930 17:16:49.834662 4818 generic.go:334] "Generic (PLEG): container finished" podID="2be08df0-9fca-4f67-b75a-e6b2cd35767d" containerID="c34297f32613b332296e0dc5de926f272afc6e5a3dfc0ab275d3c79a153763fa" exitCode=0 Sep 30 17:16:49 crc kubenswrapper[4818]: I0930 17:16:49.835120 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"2be08df0-9fca-4f67-b75a-e6b2cd35767d","Type":"ContainerDied","Data":"c34297f32613b332296e0dc5de926f272afc6e5a3dfc0ab275d3c79a153763fa"} Sep 30 17:16:52 crc kubenswrapper[4818]: I0930 17:16:52.861968 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/alertmanager-metric-storage-0" event={"ID":"81366e0f-12de-49bd-8834-68b2d0da319b","Type":"ContainerStarted","Data":"6e869842848fcc2bf16311fcaca724d776e5114f12b8e37345e8aa23a30aa83b"} Sep 30 17:16:54 crc kubenswrapper[4818]: I0930 17:16:54.889209 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/alertmanager-metric-storage-0" event={"ID":"81366e0f-12de-49bd-8834-68b2d0da319b","Type":"ContainerStarted","Data":"f8b2cd3c140a611ede849d118d83438be8d62cc1a3adae936fa0a0a878208041"} Sep 30 17:16:54 crc kubenswrapper[4818]: I0930 17:16:54.889736 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:54 crc kubenswrapper[4818]: I0930 17:16:54.921781 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/alertmanager-metric-storage-0" podStartSLOduration=4.495142252 podStartE2EDuration="29.921762984s" podCreationTimestamp="2025-09-30 17:16:25 +0000 UTC" firstStartedPulling="2025-09-30 17:16:26.97824105 +0000 UTC m=+1033.732512866" lastFinishedPulling="2025-09-30 17:16:52.404861782 +0000 UTC m=+1059.159133598" observedRunningTime="2025-09-30 17:16:54.911836766 +0000 UTC m=+1061.666108602" watchObservedRunningTime="2025-09-30 17:16:54.921762984 +0000 UTC m=+1061.676034800" Sep 30 17:16:55 crc kubenswrapper[4818]: I0930 17:16:55.061003 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/keystone-db-create-s7x5r"] Sep 30 17:16:55 crc kubenswrapper[4818]: I0930 17:16:55.062515 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-db-create-s7x5r" Sep 30 17:16:55 crc kubenswrapper[4818]: I0930 17:16:55.075537 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-db-create-s7x5r"] Sep 30 17:16:55 crc kubenswrapper[4818]: I0930 17:16:55.132365 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmt8q\" (UniqueName: \"kubernetes.io/projected/51ba880e-f6e2-4aff-99ea-268e68bc0b94-kube-api-access-xmt8q\") pod \"keystone-db-create-s7x5r\" (UID: \"51ba880e-f6e2-4aff-99ea-268e68bc0b94\") " pod="watcher-kuttl-default/keystone-db-create-s7x5r" Sep 30 17:16:55 crc kubenswrapper[4818]: I0930 17:16:55.234152 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmt8q\" (UniqueName: \"kubernetes.io/projected/51ba880e-f6e2-4aff-99ea-268e68bc0b94-kube-api-access-xmt8q\") pod \"keystone-db-create-s7x5r\" (UID: \"51ba880e-f6e2-4aff-99ea-268e68bc0b94\") " pod="watcher-kuttl-default/keystone-db-create-s7x5r" Sep 30 17:16:55 crc kubenswrapper[4818]: I0930 17:16:55.250084 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmt8q\" (UniqueName: \"kubernetes.io/projected/51ba880e-f6e2-4aff-99ea-268e68bc0b94-kube-api-access-xmt8q\") pod \"keystone-db-create-s7x5r\" (UID: \"51ba880e-f6e2-4aff-99ea-268e68bc0b94\") " pod="watcher-kuttl-default/keystone-db-create-s7x5r" Sep 30 17:16:55 crc kubenswrapper[4818]: I0930 17:16:55.392683 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-db-create-s7x5r" Sep 30 17:16:55 crc kubenswrapper[4818]: I0930 17:16:55.902713 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"2be08df0-9fca-4f67-b75a-e6b2cd35767d","Type":"ContainerStarted","Data":"94a7b16ce4522659fc8a474795297eddd946756a336b8e3fb23808e918ded234"} Sep 30 17:16:55 crc kubenswrapper[4818]: I0930 17:16:55.907096 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/alertmanager-metric-storage-0" Sep 30 17:16:55 crc kubenswrapper[4818]: I0930 17:16:55.989203 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-db-create-s7x5r"] Sep 30 17:16:56 crc kubenswrapper[4818]: I0930 17:16:56.911989 4818 generic.go:334] "Generic (PLEG): container finished" podID="51ba880e-f6e2-4aff-99ea-268e68bc0b94" containerID="9ca1f0c21f686e7a5eba0904588292a1bc8fdf00120ffc0b01aa6a7e9a3c8387" exitCode=0 Sep 30 17:16:56 crc kubenswrapper[4818]: I0930 17:16:56.912041 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-db-create-s7x5r" event={"ID":"51ba880e-f6e2-4aff-99ea-268e68bc0b94","Type":"ContainerDied","Data":"9ca1f0c21f686e7a5eba0904588292a1bc8fdf00120ffc0b01aa6a7e9a3c8387"} Sep 30 17:16:56 crc kubenswrapper[4818]: I0930 17:16:56.912456 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-db-create-s7x5r" event={"ID":"51ba880e-f6e2-4aff-99ea-268e68bc0b94","Type":"ContainerStarted","Data":"cf419ad599ccd6a4da989d2d163df56c693762e893db2da49056da5f46b7c2e0"} Sep 30 17:16:57 crc kubenswrapper[4818]: I0930 17:16:57.922862 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"2be08df0-9fca-4f67-b75a-e6b2cd35767d","Type":"ContainerStarted","Data":"1aecd570f97b7a73436854c4eacb4b41ecf9d4ce5433661bd157b79bfc3368da"} Sep 30 17:16:58 crc kubenswrapper[4818]: I0930 17:16:58.250335 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-db-create-s7x5r" Sep 30 17:16:58 crc kubenswrapper[4818]: I0930 17:16:58.382334 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xmt8q\" (UniqueName: \"kubernetes.io/projected/51ba880e-f6e2-4aff-99ea-268e68bc0b94-kube-api-access-xmt8q\") pod \"51ba880e-f6e2-4aff-99ea-268e68bc0b94\" (UID: \"51ba880e-f6e2-4aff-99ea-268e68bc0b94\") " Sep 30 17:16:58 crc kubenswrapper[4818]: I0930 17:16:58.391942 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51ba880e-f6e2-4aff-99ea-268e68bc0b94-kube-api-access-xmt8q" (OuterVolumeSpecName: "kube-api-access-xmt8q") pod "51ba880e-f6e2-4aff-99ea-268e68bc0b94" (UID: "51ba880e-f6e2-4aff-99ea-268e68bc0b94"). InnerVolumeSpecName "kube-api-access-xmt8q". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:16:58 crc kubenswrapper[4818]: I0930 17:16:58.484136 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xmt8q\" (UniqueName: \"kubernetes.io/projected/51ba880e-f6e2-4aff-99ea-268e68bc0b94-kube-api-access-xmt8q\") on node \"crc\" DevicePath \"\"" Sep 30 17:16:58 crc kubenswrapper[4818]: I0930 17:16:58.937420 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-db-create-s7x5r" event={"ID":"51ba880e-f6e2-4aff-99ea-268e68bc0b94","Type":"ContainerDied","Data":"cf419ad599ccd6a4da989d2d163df56c693762e893db2da49056da5f46b7c2e0"} Sep 30 17:16:58 crc kubenswrapper[4818]: I0930 17:16:58.937530 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cf419ad599ccd6a4da989d2d163df56c693762e893db2da49056da5f46b7c2e0" Sep 30 17:16:58 crc kubenswrapper[4818]: I0930 17:16:58.937544 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-db-create-s7x5r" Sep 30 17:17:00 crc kubenswrapper[4818]: I0930 17:17:00.961122 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"2be08df0-9fca-4f67-b75a-e6b2cd35767d","Type":"ContainerStarted","Data":"ceb93aff56701f0486d48e44e8f460540d75ddea3ec9a1471bf7ec484ae55787"} Sep 30 17:17:00 crc kubenswrapper[4818]: I0930 17:17:00.994105 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/prometheus-metric-storage-0" podStartSLOduration=3.791668639 podStartE2EDuration="35.994077921s" podCreationTimestamp="2025-09-30 17:16:25 +0000 UTC" firstStartedPulling="2025-09-30 17:16:28.39512569 +0000 UTC m=+1035.149397496" lastFinishedPulling="2025-09-30 17:17:00.597534962 +0000 UTC m=+1067.351806778" observedRunningTime="2025-09-30 17:17:00.986751803 +0000 UTC m=+1067.741023619" watchObservedRunningTime="2025-09-30 17:17:00.994077921 +0000 UTC m=+1067.748349747" Sep 30 17:17:02 crc kubenswrapper[4818]: I0930 17:17:02.211648 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:02 crc kubenswrapper[4818]: I0930 17:17:02.813858 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-54465874f9-657tn" podUID="84d9da11-9e97-4595-a374-60d92eeb9737" containerName="console" containerID="cri-o://7f997279a3a2a2fd867b90485a3718728f05465f1dfa1e9cd97cc2895ea4f5f1" gracePeriod=15 Sep 30 17:17:02 crc kubenswrapper[4818]: I0930 17:17:02.979220 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-54465874f9-657tn_84d9da11-9e97-4595-a374-60d92eeb9737/console/0.log" Sep 30 17:17:02 crc kubenswrapper[4818]: I0930 17:17:02.979535 4818 generic.go:334] "Generic (PLEG): container finished" podID="84d9da11-9e97-4595-a374-60d92eeb9737" containerID="7f997279a3a2a2fd867b90485a3718728f05465f1dfa1e9cd97cc2895ea4f5f1" exitCode=2 Sep 30 17:17:02 crc kubenswrapper[4818]: I0930 17:17:02.980783 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-54465874f9-657tn" event={"ID":"84d9da11-9e97-4595-a374-60d92eeb9737","Type":"ContainerDied","Data":"7f997279a3a2a2fd867b90485a3718728f05465f1dfa1e9cd97cc2895ea4f5f1"} Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.261737 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-54465874f9-657tn_84d9da11-9e97-4595-a374-60d92eeb9737/console/0.log" Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.261815 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-54465874f9-657tn" Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.367631 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-oauth-serving-cert\") pod \"84d9da11-9e97-4595-a374-60d92eeb9737\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.367690 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-trusted-ca-bundle\") pod \"84d9da11-9e97-4595-a374-60d92eeb9737\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.367737 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/84d9da11-9e97-4595-a374-60d92eeb9737-console-oauth-config\") pod \"84d9da11-9e97-4595-a374-60d92eeb9737\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.367773 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-service-ca\") pod \"84d9da11-9e97-4595-a374-60d92eeb9737\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.367811 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/84d9da11-9e97-4595-a374-60d92eeb9737-console-serving-cert\") pod \"84d9da11-9e97-4595-a374-60d92eeb9737\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.367828 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-console-config\") pod \"84d9da11-9e97-4595-a374-60d92eeb9737\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.367847 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4twgr\" (UniqueName: \"kubernetes.io/projected/84d9da11-9e97-4595-a374-60d92eeb9737-kube-api-access-4twgr\") pod \"84d9da11-9e97-4595-a374-60d92eeb9737\" (UID: \"84d9da11-9e97-4595-a374-60d92eeb9737\") " Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.368284 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "84d9da11-9e97-4595-a374-60d92eeb9737" (UID: "84d9da11-9e97-4595-a374-60d92eeb9737"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.368306 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "84d9da11-9e97-4595-a374-60d92eeb9737" (UID: "84d9da11-9e97-4595-a374-60d92eeb9737"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.368317 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-service-ca" (OuterVolumeSpecName: "service-ca") pod "84d9da11-9e97-4595-a374-60d92eeb9737" (UID: "84d9da11-9e97-4595-a374-60d92eeb9737"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.368387 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-console-config" (OuterVolumeSpecName: "console-config") pod "84d9da11-9e97-4595-a374-60d92eeb9737" (UID: "84d9da11-9e97-4595-a374-60d92eeb9737"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.368725 4818 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.368739 4818 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.368748 4818 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-service-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.368757 4818 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/84d9da11-9e97-4595-a374-60d92eeb9737-console-config\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.372911 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84d9da11-9e97-4595-a374-60d92eeb9737-kube-api-access-4twgr" (OuterVolumeSpecName: "kube-api-access-4twgr") pod "84d9da11-9e97-4595-a374-60d92eeb9737" (UID: "84d9da11-9e97-4595-a374-60d92eeb9737"). InnerVolumeSpecName "kube-api-access-4twgr". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.373141 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84d9da11-9e97-4595-a374-60d92eeb9737-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "84d9da11-9e97-4595-a374-60d92eeb9737" (UID: "84d9da11-9e97-4595-a374-60d92eeb9737"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.373265 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84d9da11-9e97-4595-a374-60d92eeb9737-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "84d9da11-9e97-4595-a374-60d92eeb9737" (UID: "84d9da11-9e97-4595-a374-60d92eeb9737"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.470451 4818 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/84d9da11-9e97-4595-a374-60d92eeb9737-console-oauth-config\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.470483 4818 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/84d9da11-9e97-4595-a374-60d92eeb9737-console-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.470494 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4twgr\" (UniqueName: \"kubernetes.io/projected/84d9da11-9e97-4595-a374-60d92eeb9737-kube-api-access-4twgr\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.988830 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-54465874f9-657tn_84d9da11-9e97-4595-a374-60d92eeb9737/console/0.log" Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.989132 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-54465874f9-657tn" event={"ID":"84d9da11-9e97-4595-a374-60d92eeb9737","Type":"ContainerDied","Data":"3e5a0f58e8ffe653990a80b2e67bb9d782270d588a816e8305ff30ca71429c23"} Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.989172 4818 scope.go:117] "RemoveContainer" containerID="7f997279a3a2a2fd867b90485a3718728f05465f1dfa1e9cd97cc2895ea4f5f1" Sep 30 17:17:03 crc kubenswrapper[4818]: I0930 17:17:03.989323 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-54465874f9-657tn" Sep 30 17:17:04 crc kubenswrapper[4818]: I0930 17:17:04.033515 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-54465874f9-657tn"] Sep 30 17:17:04 crc kubenswrapper[4818]: I0930 17:17:04.033556 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-54465874f9-657tn"] Sep 30 17:17:05 crc kubenswrapper[4818]: I0930 17:17:05.062895 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/keystone-ed0b-account-create-vrnmw"] Sep 30 17:17:05 crc kubenswrapper[4818]: E0930 17:17:05.063296 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51ba880e-f6e2-4aff-99ea-268e68bc0b94" containerName="mariadb-database-create" Sep 30 17:17:05 crc kubenswrapper[4818]: I0930 17:17:05.063311 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="51ba880e-f6e2-4aff-99ea-268e68bc0b94" containerName="mariadb-database-create" Sep 30 17:17:05 crc kubenswrapper[4818]: E0930 17:17:05.063336 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84d9da11-9e97-4595-a374-60d92eeb9737" containerName="console" Sep 30 17:17:05 crc kubenswrapper[4818]: I0930 17:17:05.063345 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="84d9da11-9e97-4595-a374-60d92eeb9737" containerName="console" Sep 30 17:17:05 crc kubenswrapper[4818]: I0930 17:17:05.063526 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="51ba880e-f6e2-4aff-99ea-268e68bc0b94" containerName="mariadb-database-create" Sep 30 17:17:05 crc kubenswrapper[4818]: I0930 17:17:05.063554 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="84d9da11-9e97-4595-a374-60d92eeb9737" containerName="console" Sep 30 17:17:05 crc kubenswrapper[4818]: I0930 17:17:05.064200 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-ed0b-account-create-vrnmw" Sep 30 17:17:05 crc kubenswrapper[4818]: I0930 17:17:05.066559 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-db-secret" Sep 30 17:17:05 crc kubenswrapper[4818]: I0930 17:17:05.075005 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-ed0b-account-create-vrnmw"] Sep 30 17:17:05 crc kubenswrapper[4818]: I0930 17:17:05.196761 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dcpt\" (UniqueName: \"kubernetes.io/projected/8b072c26-0e09-43f2-ad65-698dcdc5cd4b-kube-api-access-9dcpt\") pod \"keystone-ed0b-account-create-vrnmw\" (UID: \"8b072c26-0e09-43f2-ad65-698dcdc5cd4b\") " pod="watcher-kuttl-default/keystone-ed0b-account-create-vrnmw" Sep 30 17:17:05 crc kubenswrapper[4818]: I0930 17:17:05.298848 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dcpt\" (UniqueName: \"kubernetes.io/projected/8b072c26-0e09-43f2-ad65-698dcdc5cd4b-kube-api-access-9dcpt\") pod \"keystone-ed0b-account-create-vrnmw\" (UID: \"8b072c26-0e09-43f2-ad65-698dcdc5cd4b\") " pod="watcher-kuttl-default/keystone-ed0b-account-create-vrnmw" Sep 30 17:17:05 crc kubenswrapper[4818]: I0930 17:17:05.321494 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dcpt\" (UniqueName: \"kubernetes.io/projected/8b072c26-0e09-43f2-ad65-698dcdc5cd4b-kube-api-access-9dcpt\") pod \"keystone-ed0b-account-create-vrnmw\" (UID: \"8b072c26-0e09-43f2-ad65-698dcdc5cd4b\") " pod="watcher-kuttl-default/keystone-ed0b-account-create-vrnmw" Sep 30 17:17:05 crc kubenswrapper[4818]: I0930 17:17:05.381343 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-ed0b-account-create-vrnmw" Sep 30 17:17:05 crc kubenswrapper[4818]: I0930 17:17:05.641512 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-ed0b-account-create-vrnmw"] Sep 30 17:17:05 crc kubenswrapper[4818]: W0930 17:17:05.646233 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8b072c26_0e09_43f2_ad65_698dcdc5cd4b.slice/crio-d65902aa74038fcfd4d05a09e12563c505188eccdaf47ae7275c875e16e1216c WatchSource:0}: Error finding container d65902aa74038fcfd4d05a09e12563c505188eccdaf47ae7275c875e16e1216c: Status 404 returned error can't find the container with id d65902aa74038fcfd4d05a09e12563c505188eccdaf47ae7275c875e16e1216c Sep 30 17:17:06 crc kubenswrapper[4818]: I0930 17:17:06.016911 4818 generic.go:334] "Generic (PLEG): container finished" podID="8b072c26-0e09-43f2-ad65-698dcdc5cd4b" containerID="590de73a9cd7bafa85597524569c55276179a60f02b0a802fcd2cd96f92db4f3" exitCode=0 Sep 30 17:17:06 crc kubenswrapper[4818]: I0930 17:17:06.016985 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-ed0b-account-create-vrnmw" event={"ID":"8b072c26-0e09-43f2-ad65-698dcdc5cd4b","Type":"ContainerDied","Data":"590de73a9cd7bafa85597524569c55276179a60f02b0a802fcd2cd96f92db4f3"} Sep 30 17:17:06 crc kubenswrapper[4818]: I0930 17:17:06.017306 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-ed0b-account-create-vrnmw" event={"ID":"8b072c26-0e09-43f2-ad65-698dcdc5cd4b","Type":"ContainerStarted","Data":"d65902aa74038fcfd4d05a09e12563c505188eccdaf47ae7275c875e16e1216c"} Sep 30 17:17:06 crc kubenswrapper[4818]: I0930 17:17:06.036476 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84d9da11-9e97-4595-a374-60d92eeb9737" path="/var/lib/kubelet/pods/84d9da11-9e97-4595-a374-60d92eeb9737/volumes" Sep 30 17:17:07 crc kubenswrapper[4818]: I0930 17:17:07.353608 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-ed0b-account-create-vrnmw" Sep 30 17:17:07 crc kubenswrapper[4818]: I0930 17:17:07.540082 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dcpt\" (UniqueName: \"kubernetes.io/projected/8b072c26-0e09-43f2-ad65-698dcdc5cd4b-kube-api-access-9dcpt\") pod \"8b072c26-0e09-43f2-ad65-698dcdc5cd4b\" (UID: \"8b072c26-0e09-43f2-ad65-698dcdc5cd4b\") " Sep 30 17:17:07 crc kubenswrapper[4818]: I0930 17:17:07.569143 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b072c26-0e09-43f2-ad65-698dcdc5cd4b-kube-api-access-9dcpt" (OuterVolumeSpecName: "kube-api-access-9dcpt") pod "8b072c26-0e09-43f2-ad65-698dcdc5cd4b" (UID: "8b072c26-0e09-43f2-ad65-698dcdc5cd4b"). InnerVolumeSpecName "kube-api-access-9dcpt". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:17:07 crc kubenswrapper[4818]: I0930 17:17:07.642424 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dcpt\" (UniqueName: \"kubernetes.io/projected/8b072c26-0e09-43f2-ad65-698dcdc5cd4b-kube-api-access-9dcpt\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:08 crc kubenswrapper[4818]: I0930 17:17:08.032146 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-ed0b-account-create-vrnmw" Sep 30 17:17:08 crc kubenswrapper[4818]: I0930 17:17:08.035372 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-ed0b-account-create-vrnmw" event={"ID":"8b072c26-0e09-43f2-ad65-698dcdc5cd4b","Type":"ContainerDied","Data":"d65902aa74038fcfd4d05a09e12563c505188eccdaf47ae7275c875e16e1216c"} Sep 30 17:17:08 crc kubenswrapper[4818]: I0930 17:17:08.035423 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d65902aa74038fcfd4d05a09e12563c505188eccdaf47ae7275c875e16e1216c" Sep 30 17:17:10 crc kubenswrapper[4818]: I0930 17:17:10.051458 4818 generic.go:334] "Generic (PLEG): container finished" podID="f707c20f-09e2-4aa7-9a18-5b37f2050e45" containerID="2e142b77678decc1befcd1414b7a0a72a65e60fc1dbbb46829e8e7d0e05a73af" exitCode=0 Sep 30 17:17:10 crc kubenswrapper[4818]: I0930 17:17:10.051539 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/rabbitmq-server-0" event={"ID":"f707c20f-09e2-4aa7-9a18-5b37f2050e45","Type":"ContainerDied","Data":"2e142b77678decc1befcd1414b7a0a72a65e60fc1dbbb46829e8e7d0e05a73af"} Sep 30 17:17:10 crc kubenswrapper[4818]: I0930 17:17:10.060391 4818 generic.go:334] "Generic (PLEG): container finished" podID="68a4cecf-f627-497d-a682-5092ea0b3298" containerID="93ba45143754109a4ad420cd007cbc0eee8def3286d9c62258b0365114cc8400" exitCode=0 Sep 30 17:17:10 crc kubenswrapper[4818]: I0930 17:17:10.060673 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" event={"ID":"68a4cecf-f627-497d-a682-5092ea0b3298","Type":"ContainerDied","Data":"93ba45143754109a4ad420cd007cbc0eee8def3286d9c62258b0365114cc8400"} Sep 30 17:17:11 crc kubenswrapper[4818]: I0930 17:17:11.072697 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/rabbitmq-server-0" event={"ID":"f707c20f-09e2-4aa7-9a18-5b37f2050e45","Type":"ContainerStarted","Data":"68125ed2148573df361e8ffb51da5b306e156f3f2c743418cdb46e603c32460c"} Sep 30 17:17:11 crc kubenswrapper[4818]: I0930 17:17:11.073241 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:17:11 crc kubenswrapper[4818]: I0930 17:17:11.075523 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" event={"ID":"68a4cecf-f627-497d-a682-5092ea0b3298","Type":"ContainerStarted","Data":"c2b54b01c0a232a0d10594ad16a1ca83739e2bda0ba8a26133dc129a606b95e5"} Sep 30 17:17:11 crc kubenswrapper[4818]: I0930 17:17:11.075741 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:17:11 crc kubenswrapper[4818]: I0930 17:17:11.099019 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/rabbitmq-server-0" podStartSLOduration=34.519165232 podStartE2EDuration="49.098999771s" podCreationTimestamp="2025-09-30 17:16:22 +0000 UTC" firstStartedPulling="2025-09-30 17:16:24.029427873 +0000 UTC m=+1030.783699689" lastFinishedPulling="2025-09-30 17:16:38.609262382 +0000 UTC m=+1045.363534228" observedRunningTime="2025-09-30 17:17:11.093584575 +0000 UTC m=+1077.847856401" watchObservedRunningTime="2025-09-30 17:17:11.098999771 +0000 UTC m=+1077.853271587" Sep 30 17:17:11 crc kubenswrapper[4818]: I0930 17:17:11.129416 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" podStartSLOduration=34.937670555 podStartE2EDuration="49.129400233s" podCreationTimestamp="2025-09-30 17:16:22 +0000 UTC" firstStartedPulling="2025-09-30 17:16:24.382497977 +0000 UTC m=+1031.136769793" lastFinishedPulling="2025-09-30 17:16:38.574227655 +0000 UTC m=+1045.328499471" observedRunningTime="2025-09-30 17:17:11.122508346 +0000 UTC m=+1077.876780162" watchObservedRunningTime="2025-09-30 17:17:11.129400233 +0000 UTC m=+1077.883672049" Sep 30 17:17:12 crc kubenswrapper[4818]: I0930 17:17:12.211503 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:12 crc kubenswrapper[4818]: I0930 17:17:12.214399 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:13 crc kubenswrapper[4818]: I0930 17:17:13.106713 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:15 crc kubenswrapper[4818]: I0930 17:17:15.579633 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/prometheus-metric-storage-0"] Sep 30 17:17:15 crc kubenswrapper[4818]: I0930 17:17:15.581643 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/prometheus-metric-storage-0" podUID="2be08df0-9fca-4f67-b75a-e6b2cd35767d" containerName="config-reloader" containerID="cri-o://1aecd570f97b7a73436854c4eacb4b41ecf9d4ce5433661bd157b79bfc3368da" gracePeriod=600 Sep 30 17:17:15 crc kubenswrapper[4818]: I0930 17:17:15.581680 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/prometheus-metric-storage-0" podUID="2be08df0-9fca-4f67-b75a-e6b2cd35767d" containerName="thanos-sidecar" containerID="cri-o://ceb93aff56701f0486d48e44e8f460540d75ddea3ec9a1471bf7ec484ae55787" gracePeriod=600 Sep 30 17:17:15 crc kubenswrapper[4818]: I0930 17:17:15.581513 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/prometheus-metric-storage-0" podUID="2be08df0-9fca-4f67-b75a-e6b2cd35767d" containerName="prometheus" containerID="cri-o://94a7b16ce4522659fc8a474795297eddd946756a336b8e3fb23808e918ded234" gracePeriod=600 Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.124828 4818 generic.go:334] "Generic (PLEG): container finished" podID="2be08df0-9fca-4f67-b75a-e6b2cd35767d" containerID="ceb93aff56701f0486d48e44e8f460540d75ddea3ec9a1471bf7ec484ae55787" exitCode=0 Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.125279 4818 generic.go:334] "Generic (PLEG): container finished" podID="2be08df0-9fca-4f67-b75a-e6b2cd35767d" containerID="1aecd570f97b7a73436854c4eacb4b41ecf9d4ce5433661bd157b79bfc3368da" exitCode=0 Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.125369 4818 generic.go:334] "Generic (PLEG): container finished" podID="2be08df0-9fca-4f67-b75a-e6b2cd35767d" containerID="94a7b16ce4522659fc8a474795297eddd946756a336b8e3fb23808e918ded234" exitCode=0 Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.125011 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"2be08df0-9fca-4f67-b75a-e6b2cd35767d","Type":"ContainerDied","Data":"ceb93aff56701f0486d48e44e8f460540d75ddea3ec9a1471bf7ec484ae55787"} Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.125563 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"2be08df0-9fca-4f67-b75a-e6b2cd35767d","Type":"ContainerDied","Data":"1aecd570f97b7a73436854c4eacb4b41ecf9d4ce5433661bd157b79bfc3368da"} Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.125658 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"2be08df0-9fca-4f67-b75a-e6b2cd35767d","Type":"ContainerDied","Data":"94a7b16ce4522659fc8a474795297eddd946756a336b8e3fb23808e918ded234"} Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.492677 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.579611 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\") pod \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.579671 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/2be08df0-9fca-4f67-b75a-e6b2cd35767d-web-config\") pod \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.579750 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/2be08df0-9fca-4f67-b75a-e6b2cd35767d-config\") pod \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.579776 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5q7m\" (UniqueName: \"kubernetes.io/projected/2be08df0-9fca-4f67-b75a-e6b2cd35767d-kube-api-access-g5q7m\") pod \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.579806 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/2be08df0-9fca-4f67-b75a-e6b2cd35767d-prometheus-metric-storage-rulefiles-0\") pod \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.579821 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/2be08df0-9fca-4f67-b75a-e6b2cd35767d-tls-assets\") pod \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.579885 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/2be08df0-9fca-4f67-b75a-e6b2cd35767d-config-out\") pod \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.579941 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/2be08df0-9fca-4f67-b75a-e6b2cd35767d-thanos-prometheus-http-client-file\") pod \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\" (UID: \"2be08df0-9fca-4f67-b75a-e6b2cd35767d\") " Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.582564 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2be08df0-9fca-4f67-b75a-e6b2cd35767d-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "2be08df0-9fca-4f67-b75a-e6b2cd35767d" (UID: "2be08df0-9fca-4f67-b75a-e6b2cd35767d"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.586035 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2be08df0-9fca-4f67-b75a-e6b2cd35767d-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "2be08df0-9fca-4f67-b75a-e6b2cd35767d" (UID: "2be08df0-9fca-4f67-b75a-e6b2cd35767d"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.586743 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2be08df0-9fca-4f67-b75a-e6b2cd35767d-config-out" (OuterVolumeSpecName: "config-out") pod "2be08df0-9fca-4f67-b75a-e6b2cd35767d" (UID: "2be08df0-9fca-4f67-b75a-e6b2cd35767d"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.587462 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2be08df0-9fca-4f67-b75a-e6b2cd35767d-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "2be08df0-9fca-4f67-b75a-e6b2cd35767d" (UID: "2be08df0-9fca-4f67-b75a-e6b2cd35767d"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.589153 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2be08df0-9fca-4f67-b75a-e6b2cd35767d-config" (OuterVolumeSpecName: "config") pod "2be08df0-9fca-4f67-b75a-e6b2cd35767d" (UID: "2be08df0-9fca-4f67-b75a-e6b2cd35767d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.597195 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "2be08df0-9fca-4f67-b75a-e6b2cd35767d" (UID: "2be08df0-9fca-4f67-b75a-e6b2cd35767d"). InnerVolumeSpecName "pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e". PluginName "kubernetes.io/csi", VolumeGidValue "" Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.601022 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2be08df0-9fca-4f67-b75a-e6b2cd35767d-kube-api-access-g5q7m" (OuterVolumeSpecName: "kube-api-access-g5q7m") pod "2be08df0-9fca-4f67-b75a-e6b2cd35767d" (UID: "2be08df0-9fca-4f67-b75a-e6b2cd35767d"). InnerVolumeSpecName "kube-api-access-g5q7m". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.614873 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2be08df0-9fca-4f67-b75a-e6b2cd35767d-web-config" (OuterVolumeSpecName: "web-config") pod "2be08df0-9fca-4f67-b75a-e6b2cd35767d" (UID: "2be08df0-9fca-4f67-b75a-e6b2cd35767d"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.681993 4818 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/2be08df0-9fca-4f67-b75a-e6b2cd35767d-config\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.682024 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5q7m\" (UniqueName: \"kubernetes.io/projected/2be08df0-9fca-4f67-b75a-e6b2cd35767d-kube-api-access-g5q7m\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.682035 4818 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/2be08df0-9fca-4f67-b75a-e6b2cd35767d-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.682044 4818 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/2be08df0-9fca-4f67-b75a-e6b2cd35767d-tls-assets\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.682054 4818 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/2be08df0-9fca-4f67-b75a-e6b2cd35767d-config-out\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.682063 4818 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/2be08df0-9fca-4f67-b75a-e6b2cd35767d-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.682100 4818 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\") on node \"crc\" " Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.682114 4818 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/2be08df0-9fca-4f67-b75a-e6b2cd35767d-web-config\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.698761 4818 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.698981 4818 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e") on node "crc" Sep 30 17:17:16 crc kubenswrapper[4818]: I0930 17:17:16.783816 4818 reconciler_common.go:293] "Volume detached for volume \"pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.134607 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"2be08df0-9fca-4f67-b75a-e6b2cd35767d","Type":"ContainerDied","Data":"fde2d345a9979ec0acba75c9a672285afa9ef5d430865612c92557943fbbb7df"} Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.134884 4818 scope.go:117] "RemoveContainer" containerID="ceb93aff56701f0486d48e44e8f460540d75ddea3ec9a1471bf7ec484ae55787" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.135019 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.162416 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/prometheus-metric-storage-0"] Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.168094 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/prometheus-metric-storage-0"] Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.178766 4818 scope.go:117] "RemoveContainer" containerID="1aecd570f97b7a73436854c4eacb4b41ecf9d4ce5433661bd157b79bfc3368da" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.204964 4818 scope.go:117] "RemoveContainer" containerID="94a7b16ce4522659fc8a474795297eddd946756a336b8e3fb23808e918ded234" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.205247 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/prometheus-metric-storage-0"] Sep 30 17:17:17 crc kubenswrapper[4818]: E0930 17:17:17.205809 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2be08df0-9fca-4f67-b75a-e6b2cd35767d" containerName="thanos-sidecar" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.205842 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="2be08df0-9fca-4f67-b75a-e6b2cd35767d" containerName="thanos-sidecar" Sep 30 17:17:17 crc kubenswrapper[4818]: E0930 17:17:17.205859 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2be08df0-9fca-4f67-b75a-e6b2cd35767d" containerName="prometheus" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.205869 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="2be08df0-9fca-4f67-b75a-e6b2cd35767d" containerName="prometheus" Sep 30 17:17:17 crc kubenswrapper[4818]: E0930 17:17:17.205881 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2be08df0-9fca-4f67-b75a-e6b2cd35767d" containerName="config-reloader" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.205890 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="2be08df0-9fca-4f67-b75a-e6b2cd35767d" containerName="config-reloader" Sep 30 17:17:17 crc kubenswrapper[4818]: E0930 17:17:17.205905 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2be08df0-9fca-4f67-b75a-e6b2cd35767d" containerName="init-config-reloader" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.205913 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="2be08df0-9fca-4f67-b75a-e6b2cd35767d" containerName="init-config-reloader" Sep 30 17:17:17 crc kubenswrapper[4818]: E0930 17:17:17.205942 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b072c26-0e09-43f2-ad65-698dcdc5cd4b" containerName="mariadb-account-create" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.205952 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b072c26-0e09-43f2-ad65-698dcdc5cd4b" containerName="mariadb-account-create" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.206462 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="2be08df0-9fca-4f67-b75a-e6b2cd35767d" containerName="config-reloader" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.206487 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="2be08df0-9fca-4f67-b75a-e6b2cd35767d" containerName="prometheus" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.206504 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="2be08df0-9fca-4f67-b75a-e6b2cd35767d" containerName="thanos-sidecar" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.206524 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b072c26-0e09-43f2-ad65-698dcdc5cd4b" containerName="mariadb-account-create" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.208322 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.210125 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-metric-storage-prometheus-svc" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.210137 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"prometheus-metric-storage" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.210174 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"metric-storage-prometheus-dockercfg-cfxgj" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.210463 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.210632 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"prometheus-metric-storage-rulefiles-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.212701 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"prometheus-metric-storage-web-config" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.218240 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"prometheus-metric-storage-tls-assets-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.221238 4818 scope.go:117] "RemoveContainer" containerID="c34297f32613b332296e0dc5de926f272afc6e5a3dfc0ab275d3c79a153763fa" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.231636 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/prometheus-metric-storage-0"] Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.291098 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.291173 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/d0294316-c783-4cab-98e5-9435e52c6979-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.291196 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d0294316-c783-4cab-98e5-9435e52c6979-config\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.291216 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/d0294316-c783-4cab-98e5-9435e52c6979-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.291234 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/d0294316-c783-4cab-98e5-9435e52c6979-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.291257 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/d0294316-c783-4cab-98e5-9435e52c6979-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.291383 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/d0294316-c783-4cab-98e5-9435e52c6979-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.291425 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/d0294316-c783-4cab-98e5-9435e52c6979-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.291579 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0294316-c783-4cab-98e5-9435e52c6979-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.291632 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdfcj\" (UniqueName: \"kubernetes.io/projected/d0294316-c783-4cab-98e5-9435e52c6979-kube-api-access-fdfcj\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.291656 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/d0294316-c783-4cab-98e5-9435e52c6979-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.393484 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/d0294316-c783-4cab-98e5-9435e52c6979-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.393580 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.393638 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/d0294316-c783-4cab-98e5-9435e52c6979-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.393660 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d0294316-c783-4cab-98e5-9435e52c6979-config\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.393693 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/d0294316-c783-4cab-98e5-9435e52c6979-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.393712 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/d0294316-c783-4cab-98e5-9435e52c6979-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.393737 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/d0294316-c783-4cab-98e5-9435e52c6979-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.393764 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/d0294316-c783-4cab-98e5-9435e52c6979-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.393782 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/d0294316-c783-4cab-98e5-9435e52c6979-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.393818 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0294316-c783-4cab-98e5-9435e52c6979-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.393837 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdfcj\" (UniqueName: \"kubernetes.io/projected/d0294316-c783-4cab-98e5-9435e52c6979-kube-api-access-fdfcj\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.395880 4818 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.395919 4818 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9122b16786948abe01c7647c3bcde8c957fd8b046438da381e77282d5c0b955d/globalmount\"" pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.397846 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/d0294316-c783-4cab-98e5-9435e52c6979-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.399345 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/d0294316-c783-4cab-98e5-9435e52c6979-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.399390 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/d0294316-c783-4cab-98e5-9435e52c6979-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.400541 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/d0294316-c783-4cab-98e5-9435e52c6979-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.402315 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/d0294316-c783-4cab-98e5-9435e52c6979-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.402560 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/d0294316-c783-4cab-98e5-9435e52c6979-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.403411 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/d0294316-c783-4cab-98e5-9435e52c6979-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.403514 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/d0294316-c783-4cab-98e5-9435e52c6979-config\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.404161 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0294316-c783-4cab-98e5-9435e52c6979-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.418833 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdfcj\" (UniqueName: \"kubernetes.io/projected/d0294316-c783-4cab-98e5-9435e52c6979-kube-api-access-fdfcj\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.430798 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c7574be3-9de2-4acb-b11c-e5c6b678d74e\") pod \"prometheus-metric-storage-0\" (UID: \"d0294316-c783-4cab-98e5-9435e52c6979\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.526642 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:17 crc kubenswrapper[4818]: I0930 17:17:17.959941 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/prometheus-metric-storage-0"] Sep 30 17:17:17 crc kubenswrapper[4818]: W0930 17:17:17.963768 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd0294316_c783_4cab_98e5_9435e52c6979.slice/crio-a6d216b534ac407cb331a610beeb9a08c54fe588fac991477306bd60978586e6 WatchSource:0}: Error finding container a6d216b534ac407cb331a610beeb9a08c54fe588fac991477306bd60978586e6: Status 404 returned error can't find the container with id a6d216b534ac407cb331a610beeb9a08c54fe588fac991477306bd60978586e6 Sep 30 17:17:18 crc kubenswrapper[4818]: I0930 17:17:18.029706 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2be08df0-9fca-4f67-b75a-e6b2cd35767d" path="/var/lib/kubelet/pods/2be08df0-9fca-4f67-b75a-e6b2cd35767d/volumes" Sep 30 17:17:18 crc kubenswrapper[4818]: I0930 17:17:18.143693 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"d0294316-c783-4cab-98e5-9435e52c6979","Type":"ContainerStarted","Data":"a6d216b534ac407cb331a610beeb9a08c54fe588fac991477306bd60978586e6"} Sep 30 17:17:21 crc kubenswrapper[4818]: I0930 17:17:21.175027 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"d0294316-c783-4cab-98e5-9435e52c6979","Type":"ContainerStarted","Data":"8b73348988b76603e9c8054da7616657173993c496516d690b446c0791c62d59"} Sep 30 17:17:22 crc kubenswrapper[4818]: I0930 17:17:22.596025 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:17:22 crc kubenswrapper[4818]: I0930 17:17:22.596432 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:17:23 crc kubenswrapper[4818]: I0930 17:17:23.436222 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/rabbitmq-server-0" Sep 30 17:17:24 crc kubenswrapper[4818]: I0930 17:17:24.061901 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/keystone-db-sync-6vgps"] Sep 30 17:17:24 crc kubenswrapper[4818]: I0930 17:17:24.062857 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-db-sync-6vgps" Sep 30 17:17:24 crc kubenswrapper[4818]: I0930 17:17:24.064745 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-keystone-dockercfg-5n2xd" Sep 30 17:17:24 crc kubenswrapper[4818]: I0930 17:17:24.065307 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone" Sep 30 17:17:24 crc kubenswrapper[4818]: I0930 17:17:24.065546 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-scripts" Sep 30 17:17:24 crc kubenswrapper[4818]: I0930 17:17:24.065552 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-config-data" Sep 30 17:17:24 crc kubenswrapper[4818]: I0930 17:17:24.081715 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-db-sync-6vgps"] Sep 30 17:17:24 crc kubenswrapper[4818]: I0930 17:17:24.131196 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Sep 30 17:17:24 crc kubenswrapper[4818]: I0930 17:17:24.198202 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a440f3f4-3dbb-4e92-96a6-1ed1bde159c2-combined-ca-bundle\") pod \"keystone-db-sync-6vgps\" (UID: \"a440f3f4-3dbb-4e92-96a6-1ed1bde159c2\") " pod="watcher-kuttl-default/keystone-db-sync-6vgps" Sep 30 17:17:24 crc kubenswrapper[4818]: I0930 17:17:24.198237 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxsbm\" (UniqueName: \"kubernetes.io/projected/a440f3f4-3dbb-4e92-96a6-1ed1bde159c2-kube-api-access-cxsbm\") pod \"keystone-db-sync-6vgps\" (UID: \"a440f3f4-3dbb-4e92-96a6-1ed1bde159c2\") " pod="watcher-kuttl-default/keystone-db-sync-6vgps" Sep 30 17:17:24 crc kubenswrapper[4818]: I0930 17:17:24.198282 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a440f3f4-3dbb-4e92-96a6-1ed1bde159c2-config-data\") pod \"keystone-db-sync-6vgps\" (UID: \"a440f3f4-3dbb-4e92-96a6-1ed1bde159c2\") " pod="watcher-kuttl-default/keystone-db-sync-6vgps" Sep 30 17:17:24 crc kubenswrapper[4818]: I0930 17:17:24.299411 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxsbm\" (UniqueName: \"kubernetes.io/projected/a440f3f4-3dbb-4e92-96a6-1ed1bde159c2-kube-api-access-cxsbm\") pod \"keystone-db-sync-6vgps\" (UID: \"a440f3f4-3dbb-4e92-96a6-1ed1bde159c2\") " pod="watcher-kuttl-default/keystone-db-sync-6vgps" Sep 30 17:17:24 crc kubenswrapper[4818]: I0930 17:17:24.299491 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a440f3f4-3dbb-4e92-96a6-1ed1bde159c2-config-data\") pod \"keystone-db-sync-6vgps\" (UID: \"a440f3f4-3dbb-4e92-96a6-1ed1bde159c2\") " pod="watcher-kuttl-default/keystone-db-sync-6vgps" Sep 30 17:17:24 crc kubenswrapper[4818]: I0930 17:17:24.299656 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a440f3f4-3dbb-4e92-96a6-1ed1bde159c2-combined-ca-bundle\") pod \"keystone-db-sync-6vgps\" (UID: \"a440f3f4-3dbb-4e92-96a6-1ed1bde159c2\") " pod="watcher-kuttl-default/keystone-db-sync-6vgps" Sep 30 17:17:24 crc kubenswrapper[4818]: I0930 17:17:24.305499 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a440f3f4-3dbb-4e92-96a6-1ed1bde159c2-combined-ca-bundle\") pod \"keystone-db-sync-6vgps\" (UID: \"a440f3f4-3dbb-4e92-96a6-1ed1bde159c2\") " pod="watcher-kuttl-default/keystone-db-sync-6vgps" Sep 30 17:17:24 crc kubenswrapper[4818]: I0930 17:17:24.305778 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a440f3f4-3dbb-4e92-96a6-1ed1bde159c2-config-data\") pod \"keystone-db-sync-6vgps\" (UID: \"a440f3f4-3dbb-4e92-96a6-1ed1bde159c2\") " pod="watcher-kuttl-default/keystone-db-sync-6vgps" Sep 30 17:17:24 crc kubenswrapper[4818]: I0930 17:17:24.315230 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxsbm\" (UniqueName: \"kubernetes.io/projected/a440f3f4-3dbb-4e92-96a6-1ed1bde159c2-kube-api-access-cxsbm\") pod \"keystone-db-sync-6vgps\" (UID: \"a440f3f4-3dbb-4e92-96a6-1ed1bde159c2\") " pod="watcher-kuttl-default/keystone-db-sync-6vgps" Sep 30 17:17:24 crc kubenswrapper[4818]: I0930 17:17:24.379193 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-db-sync-6vgps" Sep 30 17:17:24 crc kubenswrapper[4818]: I0930 17:17:24.896370 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-db-sync-6vgps"] Sep 30 17:17:25 crc kubenswrapper[4818]: I0930 17:17:25.218796 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-db-sync-6vgps" event={"ID":"a440f3f4-3dbb-4e92-96a6-1ed1bde159c2","Type":"ContainerStarted","Data":"07e0bd5507823d837c8b1936489ad611125892265cd51ac0f42cd2c8fc2fd073"} Sep 30 17:17:28 crc kubenswrapper[4818]: I0930 17:17:28.248832 4818 generic.go:334] "Generic (PLEG): container finished" podID="d0294316-c783-4cab-98e5-9435e52c6979" containerID="8b73348988b76603e9c8054da7616657173993c496516d690b446c0791c62d59" exitCode=0 Sep 30 17:17:28 crc kubenswrapper[4818]: I0930 17:17:28.248990 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"d0294316-c783-4cab-98e5-9435e52c6979","Type":"ContainerDied","Data":"8b73348988b76603e9c8054da7616657173993c496516d690b446c0791c62d59"} Sep 30 17:17:34 crc kubenswrapper[4818]: I0930 17:17:34.305260 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-db-sync-6vgps" event={"ID":"a440f3f4-3dbb-4e92-96a6-1ed1bde159c2","Type":"ContainerStarted","Data":"b1c8f422c861eb90f8e2fbaa82d47d565c47b417456fbc1f1853d78d50f2ffe8"} Sep 30 17:17:34 crc kubenswrapper[4818]: I0930 17:17:34.308243 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"d0294316-c783-4cab-98e5-9435e52c6979","Type":"ContainerStarted","Data":"d96abc29ed02e6029d2fbdb00feb09c8cff58f10b2ab3b5a79343fa3348f5a9a"} Sep 30 17:17:34 crc kubenswrapper[4818]: I0930 17:17:34.326734 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/keystone-db-sync-6vgps" podStartSLOduration=1.477137787 podStartE2EDuration="10.326712524s" podCreationTimestamp="2025-09-30 17:17:24 +0000 UTC" firstStartedPulling="2025-09-30 17:17:24.898774804 +0000 UTC m=+1091.653046620" lastFinishedPulling="2025-09-30 17:17:33.748349541 +0000 UTC m=+1100.502621357" observedRunningTime="2025-09-30 17:17:34.321078852 +0000 UTC m=+1101.075350728" watchObservedRunningTime="2025-09-30 17:17:34.326712524 +0000 UTC m=+1101.080984380" Sep 30 17:17:37 crc kubenswrapper[4818]: I0930 17:17:37.347692 4818 generic.go:334] "Generic (PLEG): container finished" podID="a440f3f4-3dbb-4e92-96a6-1ed1bde159c2" containerID="b1c8f422c861eb90f8e2fbaa82d47d565c47b417456fbc1f1853d78d50f2ffe8" exitCode=0 Sep 30 17:17:37 crc kubenswrapper[4818]: I0930 17:17:37.347859 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-db-sync-6vgps" event={"ID":"a440f3f4-3dbb-4e92-96a6-1ed1bde159c2","Type":"ContainerDied","Data":"b1c8f422c861eb90f8e2fbaa82d47d565c47b417456fbc1f1853d78d50f2ffe8"} Sep 30 17:17:38 crc kubenswrapper[4818]: I0930 17:17:38.362797 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"d0294316-c783-4cab-98e5-9435e52c6979","Type":"ContainerStarted","Data":"7d636446dee1e5ad8455a67e99e431c8e749aea935d59ad318c5c7ce25976bed"} Sep 30 17:17:38 crc kubenswrapper[4818]: I0930 17:17:38.363144 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"d0294316-c783-4cab-98e5-9435e52c6979","Type":"ContainerStarted","Data":"db30158303bc6ec04c3570e50524e04e0688afd5a659e011e539a62d9ccc492c"} Sep 30 17:17:38 crc kubenswrapper[4818]: I0930 17:17:38.406964 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/prometheus-metric-storage-0" podStartSLOduration=21.406907754 podStartE2EDuration="21.406907754s" podCreationTimestamp="2025-09-30 17:17:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:17:38.401750245 +0000 UTC m=+1105.156022091" watchObservedRunningTime="2025-09-30 17:17:38.406907754 +0000 UTC m=+1105.161179590" Sep 30 17:17:38 crc kubenswrapper[4818]: I0930 17:17:38.739660 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-db-sync-6vgps" Sep 30 17:17:38 crc kubenswrapper[4818]: I0930 17:17:38.862023 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a440f3f4-3dbb-4e92-96a6-1ed1bde159c2-config-data\") pod \"a440f3f4-3dbb-4e92-96a6-1ed1bde159c2\" (UID: \"a440f3f4-3dbb-4e92-96a6-1ed1bde159c2\") " Sep 30 17:17:38 crc kubenswrapper[4818]: I0930 17:17:38.862077 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cxsbm\" (UniqueName: \"kubernetes.io/projected/a440f3f4-3dbb-4e92-96a6-1ed1bde159c2-kube-api-access-cxsbm\") pod \"a440f3f4-3dbb-4e92-96a6-1ed1bde159c2\" (UID: \"a440f3f4-3dbb-4e92-96a6-1ed1bde159c2\") " Sep 30 17:17:38 crc kubenswrapper[4818]: I0930 17:17:38.862111 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a440f3f4-3dbb-4e92-96a6-1ed1bde159c2-combined-ca-bundle\") pod \"a440f3f4-3dbb-4e92-96a6-1ed1bde159c2\" (UID: \"a440f3f4-3dbb-4e92-96a6-1ed1bde159c2\") " Sep 30 17:17:38 crc kubenswrapper[4818]: I0930 17:17:38.871932 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a440f3f4-3dbb-4e92-96a6-1ed1bde159c2-kube-api-access-cxsbm" (OuterVolumeSpecName: "kube-api-access-cxsbm") pod "a440f3f4-3dbb-4e92-96a6-1ed1bde159c2" (UID: "a440f3f4-3dbb-4e92-96a6-1ed1bde159c2"). InnerVolumeSpecName "kube-api-access-cxsbm". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:17:38 crc kubenswrapper[4818]: I0930 17:17:38.885496 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a440f3f4-3dbb-4e92-96a6-1ed1bde159c2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a440f3f4-3dbb-4e92-96a6-1ed1bde159c2" (UID: "a440f3f4-3dbb-4e92-96a6-1ed1bde159c2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:17:38 crc kubenswrapper[4818]: I0930 17:17:38.903452 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a440f3f4-3dbb-4e92-96a6-1ed1bde159c2-config-data" (OuterVolumeSpecName: "config-data") pod "a440f3f4-3dbb-4e92-96a6-1ed1bde159c2" (UID: "a440f3f4-3dbb-4e92-96a6-1ed1bde159c2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:17:38 crc kubenswrapper[4818]: I0930 17:17:38.963479 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a440f3f4-3dbb-4e92-96a6-1ed1bde159c2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:38 crc kubenswrapper[4818]: I0930 17:17:38.963512 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a440f3f4-3dbb-4e92-96a6-1ed1bde159c2-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:38 crc kubenswrapper[4818]: I0930 17:17:38.963521 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cxsbm\" (UniqueName: \"kubernetes.io/projected/a440f3f4-3dbb-4e92-96a6-1ed1bde159c2-kube-api-access-cxsbm\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.371093 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-db-sync-6vgps" event={"ID":"a440f3f4-3dbb-4e92-96a6-1ed1bde159c2","Type":"ContainerDied","Data":"07e0bd5507823d837c8b1936489ad611125892265cd51ac0f42cd2c8fc2fd073"} Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.371135 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="07e0bd5507823d837c8b1936489ad611125892265cd51ac0f42cd2c8fc2fd073" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.371179 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-db-sync-6vgps" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.591747 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-z57r8"] Sep 30 17:17:39 crc kubenswrapper[4818]: E0930 17:17:39.592104 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a440f3f4-3dbb-4e92-96a6-1ed1bde159c2" containerName="keystone-db-sync" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.592124 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="a440f3f4-3dbb-4e92-96a6-1ed1bde159c2" containerName="keystone-db-sync" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.592333 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="a440f3f4-3dbb-4e92-96a6-1ed1bde159c2" containerName="keystone-db-sync" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.592976 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.597949 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.598029 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-config-data" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.598220 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-scripts" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.598377 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-keystone-dockercfg-5n2xd" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.600539 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-z57r8"] Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.673565 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-scripts\") pod \"keystone-bootstrap-z57r8\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.673629 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-fernet-keys\") pod \"keystone-bootstrap-z57r8\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.673658 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-config-data\") pod \"keystone-bootstrap-z57r8\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.673860 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-combined-ca-bundle\") pod \"keystone-bootstrap-z57r8\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.673955 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bh5c8\" (UniqueName: \"kubernetes.io/projected/6c96c9c6-aad0-4fef-ba62-c260b314521d-kube-api-access-bh5c8\") pod \"keystone-bootstrap-z57r8\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.674001 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-credential-keys\") pod \"keystone-bootstrap-z57r8\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.699332 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.701771 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.705478 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.714059 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.722764 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.775892 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-config-data\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.775945 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ea80203-2016-4a6e-9417-60a37d0ee336-run-httpd\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.775968 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-config-data\") pod \"keystone-bootstrap-z57r8\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.776010 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psdtg\" (UniqueName: \"kubernetes.io/projected/9ea80203-2016-4a6e-9417-60a37d0ee336-kube-api-access-psdtg\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.776034 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bh5c8\" (UniqueName: \"kubernetes.io/projected/6c96c9c6-aad0-4fef-ba62-c260b314521d-kube-api-access-bh5c8\") pod \"keystone-bootstrap-z57r8\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.776057 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-credential-keys\") pod \"keystone-bootstrap-z57r8\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.776130 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-scripts\") pod \"keystone-bootstrap-z57r8\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.776154 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-scripts\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.776181 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-fernet-keys\") pod \"keystone-bootstrap-z57r8\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.776212 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ea80203-2016-4a6e-9417-60a37d0ee336-log-httpd\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.776237 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-combined-ca-bundle\") pod \"keystone-bootstrap-z57r8\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.776268 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.776290 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.779546 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-scripts\") pod \"keystone-bootstrap-z57r8\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.783889 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-credential-keys\") pod \"keystone-bootstrap-z57r8\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.789370 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-config-data\") pod \"keystone-bootstrap-z57r8\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.794133 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-combined-ca-bundle\") pod \"keystone-bootstrap-z57r8\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.794475 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-fernet-keys\") pod \"keystone-bootstrap-z57r8\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.796754 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bh5c8\" (UniqueName: \"kubernetes.io/projected/6c96c9c6-aad0-4fef-ba62-c260b314521d-kube-api-access-bh5c8\") pod \"keystone-bootstrap-z57r8\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.878045 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-scripts\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.878110 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ea80203-2016-4a6e-9417-60a37d0ee336-log-httpd\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.878171 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.878199 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.878232 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-config-data\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.878254 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ea80203-2016-4a6e-9417-60a37d0ee336-run-httpd\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.878300 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psdtg\" (UniqueName: \"kubernetes.io/projected/9ea80203-2016-4a6e-9417-60a37d0ee336-kube-api-access-psdtg\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.878734 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ea80203-2016-4a6e-9417-60a37d0ee336-log-httpd\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.879392 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ea80203-2016-4a6e-9417-60a37d0ee336-run-httpd\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.881378 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-scripts\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.881720 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-config-data\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.881875 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.886534 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.897752 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psdtg\" (UniqueName: \"kubernetes.io/projected/9ea80203-2016-4a6e-9417-60a37d0ee336-kube-api-access-psdtg\") pod \"ceilometer-0\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:39 crc kubenswrapper[4818]: I0930 17:17:39.914730 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:40 crc kubenswrapper[4818]: I0930 17:17:40.036012 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:17:40 crc kubenswrapper[4818]: I0930 17:17:40.373407 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-z57r8"] Sep 30 17:17:40 crc kubenswrapper[4818]: W0930 17:17:40.374033 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c96c9c6_aad0_4fef_ba62_c260b314521d.slice/crio-a690a170ef28a94c74b541b14b204f1ede0aec481693d20e835fc632535b7471 WatchSource:0}: Error finding container a690a170ef28a94c74b541b14b204f1ede0aec481693d20e835fc632535b7471: Status 404 returned error can't find the container with id a690a170ef28a94c74b541b14b204f1ede0aec481693d20e835fc632535b7471 Sep 30 17:17:40 crc kubenswrapper[4818]: I0930 17:17:40.515933 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:17:41 crc kubenswrapper[4818]: I0930 17:17:41.393613 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-z57r8" event={"ID":"6c96c9c6-aad0-4fef-ba62-c260b314521d","Type":"ContainerStarted","Data":"1155add45ca0967bd050f5c337f2fd6c9b1196c444bc5d6f67b8363a03795280"} Sep 30 17:17:41 crc kubenswrapper[4818]: I0930 17:17:41.393988 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-z57r8" event={"ID":"6c96c9c6-aad0-4fef-ba62-c260b314521d","Type":"ContainerStarted","Data":"a690a170ef28a94c74b541b14b204f1ede0aec481693d20e835fc632535b7471"} Sep 30 17:17:41 crc kubenswrapper[4818]: I0930 17:17:41.394772 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9ea80203-2016-4a6e-9417-60a37d0ee336","Type":"ContainerStarted","Data":"02a2c9fa7c932a45c1a760833f5be8a354f7b77728ed28952dfd0d1f780907dc"} Sep 30 17:17:41 crc kubenswrapper[4818]: I0930 17:17:41.410763 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/keystone-bootstrap-z57r8" podStartSLOduration=2.410741658 podStartE2EDuration="2.410741658s" podCreationTimestamp="2025-09-30 17:17:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:17:41.410376089 +0000 UTC m=+1108.164647905" watchObservedRunningTime="2025-09-30 17:17:41.410741658 +0000 UTC m=+1108.165013474" Sep 30 17:17:41 crc kubenswrapper[4818]: I0930 17:17:41.784602 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:17:42 crc kubenswrapper[4818]: I0930 17:17:42.527675 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:44 crc kubenswrapper[4818]: I0930 17:17:44.422576 4818 generic.go:334] "Generic (PLEG): container finished" podID="6c96c9c6-aad0-4fef-ba62-c260b314521d" containerID="1155add45ca0967bd050f5c337f2fd6c9b1196c444bc5d6f67b8363a03795280" exitCode=0 Sep 30 17:17:44 crc kubenswrapper[4818]: I0930 17:17:44.422705 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-z57r8" event={"ID":"6c96c9c6-aad0-4fef-ba62-c260b314521d","Type":"ContainerDied","Data":"1155add45ca0967bd050f5c337f2fd6c9b1196c444bc5d6f67b8363a03795280"} Sep 30 17:17:45 crc kubenswrapper[4818]: I0930 17:17:45.432549 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9ea80203-2016-4a6e-9417-60a37d0ee336","Type":"ContainerStarted","Data":"260eea95a1f4b3c8c76ee18cc786c0b6fe3cc7a06a5d1a31a4bf35b5098f19db"} Sep 30 17:17:45 crc kubenswrapper[4818]: I0930 17:17:45.870475 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.028356 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-scripts\") pod \"6c96c9c6-aad0-4fef-ba62-c260b314521d\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.028408 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-config-data\") pod \"6c96c9c6-aad0-4fef-ba62-c260b314521d\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.028457 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bh5c8\" (UniqueName: \"kubernetes.io/projected/6c96c9c6-aad0-4fef-ba62-c260b314521d-kube-api-access-bh5c8\") pod \"6c96c9c6-aad0-4fef-ba62-c260b314521d\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.028533 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-fernet-keys\") pod \"6c96c9c6-aad0-4fef-ba62-c260b314521d\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.028608 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-combined-ca-bundle\") pod \"6c96c9c6-aad0-4fef-ba62-c260b314521d\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.028644 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-credential-keys\") pod \"6c96c9c6-aad0-4fef-ba62-c260b314521d\" (UID: \"6c96c9c6-aad0-4fef-ba62-c260b314521d\") " Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.033719 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-scripts" (OuterVolumeSpecName: "scripts") pod "6c96c9c6-aad0-4fef-ba62-c260b314521d" (UID: "6c96c9c6-aad0-4fef-ba62-c260b314521d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.034035 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "6c96c9c6-aad0-4fef-ba62-c260b314521d" (UID: "6c96c9c6-aad0-4fef-ba62-c260b314521d"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.040151 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "6c96c9c6-aad0-4fef-ba62-c260b314521d" (UID: "6c96c9c6-aad0-4fef-ba62-c260b314521d"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.047811 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c96c9c6-aad0-4fef-ba62-c260b314521d-kube-api-access-bh5c8" (OuterVolumeSpecName: "kube-api-access-bh5c8") pod "6c96c9c6-aad0-4fef-ba62-c260b314521d" (UID: "6c96c9c6-aad0-4fef-ba62-c260b314521d"). InnerVolumeSpecName "kube-api-access-bh5c8". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.059053 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c96c9c6-aad0-4fef-ba62-c260b314521d" (UID: "6c96c9c6-aad0-4fef-ba62-c260b314521d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.076281 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-config-data" (OuterVolumeSpecName: "config-data") pod "6c96c9c6-aad0-4fef-ba62-c260b314521d" (UID: "6c96c9c6-aad0-4fef-ba62-c260b314521d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.131017 4818 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-fernet-keys\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.131062 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.131077 4818 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-credential-keys\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.131089 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.131099 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c96c9c6-aad0-4fef-ba62-c260b314521d-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.131110 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bh5c8\" (UniqueName: \"kubernetes.io/projected/6c96c9c6-aad0-4fef-ba62-c260b314521d-kube-api-access-bh5c8\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.440978 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-z57r8" event={"ID":"6c96c9c6-aad0-4fef-ba62-c260b314521d","Type":"ContainerDied","Data":"a690a170ef28a94c74b541b14b204f1ede0aec481693d20e835fc632535b7471"} Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.441016 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a690a170ef28a94c74b541b14b204f1ede0aec481693d20e835fc632535b7471" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.441065 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-z57r8" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.518416 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-z57r8"] Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.526024 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-z57r8"] Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.604187 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-hjctc"] Sep 30 17:17:46 crc kubenswrapper[4818]: E0930 17:17:46.604585 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c96c9c6-aad0-4fef-ba62-c260b314521d" containerName="keystone-bootstrap" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.604608 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c96c9c6-aad0-4fef-ba62-c260b314521d" containerName="keystone-bootstrap" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.604807 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c96c9c6-aad0-4fef-ba62-c260b314521d" containerName="keystone-bootstrap" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.605478 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.607787 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.608102 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-scripts" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.614011 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-config-data" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.614013 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-keystone-dockercfg-5n2xd" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.616449 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-hjctc"] Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.739982 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-config-data\") pod \"keystone-bootstrap-hjctc\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.740049 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-credential-keys\") pod \"keystone-bootstrap-hjctc\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.740217 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-combined-ca-bundle\") pod \"keystone-bootstrap-hjctc\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.740257 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-fernet-keys\") pod \"keystone-bootstrap-hjctc\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.740328 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-scripts\") pod \"keystone-bootstrap-hjctc\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.740362 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnfkl\" (UniqueName: \"kubernetes.io/projected/b40b79de-4540-4db4-9468-0c9786456c5a-kube-api-access-pnfkl\") pod \"keystone-bootstrap-hjctc\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.842010 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-combined-ca-bundle\") pod \"keystone-bootstrap-hjctc\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.842340 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-fernet-keys\") pod \"keystone-bootstrap-hjctc\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.842411 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-scripts\") pod \"keystone-bootstrap-hjctc\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.842448 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnfkl\" (UniqueName: \"kubernetes.io/projected/b40b79de-4540-4db4-9468-0c9786456c5a-kube-api-access-pnfkl\") pod \"keystone-bootstrap-hjctc\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.842494 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-config-data\") pod \"keystone-bootstrap-hjctc\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.842526 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-credential-keys\") pod \"keystone-bootstrap-hjctc\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.845494 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-scripts\") pod \"keystone-bootstrap-hjctc\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.845566 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-config-data\") pod \"keystone-bootstrap-hjctc\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.846114 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-fernet-keys\") pod \"keystone-bootstrap-hjctc\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.846993 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-combined-ca-bundle\") pod \"keystone-bootstrap-hjctc\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.849621 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-credential-keys\") pod \"keystone-bootstrap-hjctc\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.861408 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnfkl\" (UniqueName: \"kubernetes.io/projected/b40b79de-4540-4db4-9468-0c9786456c5a-kube-api-access-pnfkl\") pod \"keystone-bootstrap-hjctc\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:46 crc kubenswrapper[4818]: I0930 17:17:46.919128 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:47 crc kubenswrapper[4818]: I0930 17:17:47.397718 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-hjctc"] Sep 30 17:17:47 crc kubenswrapper[4818]: W0930 17:17:47.399966 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb40b79de_4540_4db4_9468_0c9786456c5a.slice/crio-0cbd43ee9bbf0a795f86faa45e38c6afe7de64139db8844db7c7c3a1010e71a9 WatchSource:0}: Error finding container 0cbd43ee9bbf0a795f86faa45e38c6afe7de64139db8844db7c7c3a1010e71a9: Status 404 returned error can't find the container with id 0cbd43ee9bbf0a795f86faa45e38c6afe7de64139db8844db7c7c3a1010e71a9 Sep 30 17:17:47 crc kubenswrapper[4818]: I0930 17:17:47.458095 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9ea80203-2016-4a6e-9417-60a37d0ee336","Type":"ContainerStarted","Data":"429f4d3b40e2ca1c851f8fde4e8eab556c84bb62df32cfdd91e386eca4089008"} Sep 30 17:17:47 crc kubenswrapper[4818]: I0930 17:17:47.459641 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-hjctc" event={"ID":"b40b79de-4540-4db4-9468-0c9786456c5a","Type":"ContainerStarted","Data":"0cbd43ee9bbf0a795f86faa45e38c6afe7de64139db8844db7c7c3a1010e71a9"} Sep 30 17:17:47 crc kubenswrapper[4818]: I0930 17:17:47.527001 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:47 crc kubenswrapper[4818]: I0930 17:17:47.533777 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:48 crc kubenswrapper[4818]: I0930 17:17:48.039792 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c96c9c6-aad0-4fef-ba62-c260b314521d" path="/var/lib/kubelet/pods/6c96c9c6-aad0-4fef-ba62-c260b314521d/volumes" Sep 30 17:17:48 crc kubenswrapper[4818]: I0930 17:17:48.492860 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-hjctc" event={"ID":"b40b79de-4540-4db4-9468-0c9786456c5a","Type":"ContainerStarted","Data":"f4881e26b2431315c39ff35a5becad71a574c68d86198258c713e669051da2cb"} Sep 30 17:17:48 crc kubenswrapper[4818]: I0930 17:17:48.497563 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/prometheus-metric-storage-0" Sep 30 17:17:48 crc kubenswrapper[4818]: I0930 17:17:48.516728 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/keystone-bootstrap-hjctc" podStartSLOduration=2.516708805 podStartE2EDuration="2.516708805s" podCreationTimestamp="2025-09-30 17:17:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:17:48.512677266 +0000 UTC m=+1115.266949102" watchObservedRunningTime="2025-09-30 17:17:48.516708805 +0000 UTC m=+1115.270980621" Sep 30 17:17:51 crc kubenswrapper[4818]: I0930 17:17:51.524259 4818 generic.go:334] "Generic (PLEG): container finished" podID="b40b79de-4540-4db4-9468-0c9786456c5a" containerID="f4881e26b2431315c39ff35a5becad71a574c68d86198258c713e669051da2cb" exitCode=0 Sep 30 17:17:51 crc kubenswrapper[4818]: I0930 17:17:51.524652 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-hjctc" event={"ID":"b40b79de-4540-4db4-9468-0c9786456c5a","Type":"ContainerDied","Data":"f4881e26b2431315c39ff35a5becad71a574c68d86198258c713e669051da2cb"} Sep 30 17:17:52 crc kubenswrapper[4818]: I0930 17:17:52.595598 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:17:52 crc kubenswrapper[4818]: I0930 17:17:52.595656 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.054417 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.166546 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-combined-ca-bundle\") pod \"b40b79de-4540-4db4-9468-0c9786456c5a\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.166577 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-config-data\") pod \"b40b79de-4540-4db4-9468-0c9786456c5a\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.166607 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-scripts\") pod \"b40b79de-4540-4db4-9468-0c9786456c5a\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.166629 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-fernet-keys\") pod \"b40b79de-4540-4db4-9468-0c9786456c5a\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.166669 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pnfkl\" (UniqueName: \"kubernetes.io/projected/b40b79de-4540-4db4-9468-0c9786456c5a-kube-api-access-pnfkl\") pod \"b40b79de-4540-4db4-9468-0c9786456c5a\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.166691 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-credential-keys\") pod \"b40b79de-4540-4db4-9468-0c9786456c5a\" (UID: \"b40b79de-4540-4db4-9468-0c9786456c5a\") " Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.170304 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-scripts" (OuterVolumeSpecName: "scripts") pod "b40b79de-4540-4db4-9468-0c9786456c5a" (UID: "b40b79de-4540-4db4-9468-0c9786456c5a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.170597 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "b40b79de-4540-4db4-9468-0c9786456c5a" (UID: "b40b79de-4540-4db4-9468-0c9786456c5a"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.170769 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b40b79de-4540-4db4-9468-0c9786456c5a-kube-api-access-pnfkl" (OuterVolumeSpecName: "kube-api-access-pnfkl") pod "b40b79de-4540-4db4-9468-0c9786456c5a" (UID: "b40b79de-4540-4db4-9468-0c9786456c5a"). InnerVolumeSpecName "kube-api-access-pnfkl". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.171018 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b40b79de-4540-4db4-9468-0c9786456c5a" (UID: "b40b79de-4540-4db4-9468-0c9786456c5a"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.187039 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-config-data" (OuterVolumeSpecName: "config-data") pod "b40b79de-4540-4db4-9468-0c9786456c5a" (UID: "b40b79de-4540-4db4-9468-0c9786456c5a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.187432 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b40b79de-4540-4db4-9468-0c9786456c5a" (UID: "b40b79de-4540-4db4-9468-0c9786456c5a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.268701 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.268744 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.268791 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.268808 4818 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-fernet-keys\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.268824 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pnfkl\" (UniqueName: \"kubernetes.io/projected/b40b79de-4540-4db4-9468-0c9786456c5a-kube-api-access-pnfkl\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.268870 4818 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b40b79de-4540-4db4-9468-0c9786456c5a-credential-keys\") on node \"crc\" DevicePath \"\"" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.550014 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9ea80203-2016-4a6e-9417-60a37d0ee336","Type":"ContainerStarted","Data":"26998148bb6a1728f3fd9f43b73d877e7602d95823b3ae076c0dce17b7043989"} Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.552382 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-hjctc" event={"ID":"b40b79de-4540-4db4-9468-0c9786456c5a","Type":"ContainerDied","Data":"0cbd43ee9bbf0a795f86faa45e38c6afe7de64139db8844db7c7c3a1010e71a9"} Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.552479 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0cbd43ee9bbf0a795f86faa45e38c6afe7de64139db8844db7c7c3a1010e71a9" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.552567 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-hjctc" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.652743 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8"] Sep 30 17:17:53 crc kubenswrapper[4818]: E0930 17:17:53.653144 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b40b79de-4540-4db4-9468-0c9786456c5a" containerName="keystone-bootstrap" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.653158 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="b40b79de-4540-4db4-9468-0c9786456c5a" containerName="keystone-bootstrap" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.653367 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="b40b79de-4540-4db4-9468-0c9786456c5a" containerName="keystone-bootstrap" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.654041 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.656363 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-scripts" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.657678 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.658170 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-keystone-internal-svc" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.658220 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-config-data" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.658322 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-keystone-dockercfg-5n2xd" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.658395 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-keystone-public-svc" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.672960 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8"] Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.774951 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-scripts\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.775004 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-credential-keys\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.775024 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-config-data\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.775214 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-public-tls-certs\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.775449 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-internal-tls-certs\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.775492 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sckl5\" (UniqueName: \"kubernetes.io/projected/4c434f52-318f-4151-a9bd-11eb9be54b5a-kube-api-access-sckl5\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.775520 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-fernet-keys\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.775559 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-combined-ca-bundle\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.876717 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-public-tls-certs\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.876810 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-internal-tls-certs\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.876837 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sckl5\" (UniqueName: \"kubernetes.io/projected/4c434f52-318f-4151-a9bd-11eb9be54b5a-kube-api-access-sckl5\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.876858 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-fernet-keys\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.876881 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-combined-ca-bundle\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.876905 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-scripts\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.876945 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-credential-keys\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.876963 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-config-data\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.880466 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-scripts\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.880692 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-fernet-keys\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.880849 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-config-data\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.881017 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-combined-ca-bundle\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.881498 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-internal-tls-certs\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.881800 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-credential-keys\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.883377 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-public-tls-certs\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.895301 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sckl5\" (UniqueName: \"kubernetes.io/projected/4c434f52-318f-4151-a9bd-11eb9be54b5a-kube-api-access-sckl5\") pod \"keystone-54f6b9b5cb-zzgw8\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:53 crc kubenswrapper[4818]: I0930 17:17:53.971962 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:54 crc kubenswrapper[4818]: I0930 17:17:54.457471 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8"] Sep 30 17:17:54 crc kubenswrapper[4818]: W0930 17:17:54.460219 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c434f52_318f_4151_a9bd_11eb9be54b5a.slice/crio-1f180be6f3e98683bc15ca96bcd125b7353c5fc156c71ec8ce3ba105d954a61f WatchSource:0}: Error finding container 1f180be6f3e98683bc15ca96bcd125b7353c5fc156c71ec8ce3ba105d954a61f: Status 404 returned error can't find the container with id 1f180be6f3e98683bc15ca96bcd125b7353c5fc156c71ec8ce3ba105d954a61f Sep 30 17:17:54 crc kubenswrapper[4818]: I0930 17:17:54.575365 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" event={"ID":"4c434f52-318f-4151-a9bd-11eb9be54b5a","Type":"ContainerStarted","Data":"1f180be6f3e98683bc15ca96bcd125b7353c5fc156c71ec8ce3ba105d954a61f"} Sep 30 17:17:55 crc kubenswrapper[4818]: I0930 17:17:55.592554 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" event={"ID":"4c434f52-318f-4151-a9bd-11eb9be54b5a","Type":"ContainerStarted","Data":"4ee388ff07baacc12813196a88e4e25c4931723f8f084907455ed727593c9dd7"} Sep 30 17:17:55 crc kubenswrapper[4818]: I0930 17:17:55.592895 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:17:55 crc kubenswrapper[4818]: I0930 17:17:55.620845 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" podStartSLOduration=2.6208282819999997 podStartE2EDuration="2.620828282s" podCreationTimestamp="2025-09-30 17:17:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:17:55.618115829 +0000 UTC m=+1122.372387645" watchObservedRunningTime="2025-09-30 17:17:55.620828282 +0000 UTC m=+1122.375100098" Sep 30 17:18:04 crc kubenswrapper[4818]: I0930 17:18:04.697225 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9ea80203-2016-4a6e-9417-60a37d0ee336","Type":"ContainerStarted","Data":"b410ca8bb44b63a76d0928ff5673fe8318568a3c8dfc40b0441c1c5de8034f79"} Sep 30 17:18:04 crc kubenswrapper[4818]: I0930 17:18:04.697669 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:04 crc kubenswrapper[4818]: I0930 17:18:04.697462 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerName="proxy-httpd" containerID="cri-o://b410ca8bb44b63a76d0928ff5673fe8318568a3c8dfc40b0441c1c5de8034f79" gracePeriod=30 Sep 30 17:18:04 crc kubenswrapper[4818]: I0930 17:18:04.697378 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerName="ceilometer-central-agent" containerID="cri-o://260eea95a1f4b3c8c76ee18cc786c0b6fe3cc7a06a5d1a31a4bf35b5098f19db" gracePeriod=30 Sep 30 17:18:04 crc kubenswrapper[4818]: I0930 17:18:04.697497 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerName="ceilometer-notification-agent" containerID="cri-o://429f4d3b40e2ca1c851f8fde4e8eab556c84bb62df32cfdd91e386eca4089008" gracePeriod=30 Sep 30 17:18:04 crc kubenswrapper[4818]: I0930 17:18:04.697556 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerName="sg-core" containerID="cri-o://26998148bb6a1728f3fd9f43b73d877e7602d95823b3ae076c0dce17b7043989" gracePeriod=30 Sep 30 17:18:04 crc kubenswrapper[4818]: I0930 17:18:04.732256 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=2.624548319 podStartE2EDuration="25.732237858s" podCreationTimestamp="2025-09-30 17:17:39 +0000 UTC" firstStartedPulling="2025-09-30 17:17:40.523666141 +0000 UTC m=+1107.277937957" lastFinishedPulling="2025-09-30 17:18:03.63135566 +0000 UTC m=+1130.385627496" observedRunningTime="2025-09-30 17:18:04.725154246 +0000 UTC m=+1131.479426082" watchObservedRunningTime="2025-09-30 17:18:04.732237858 +0000 UTC m=+1131.486509694" Sep 30 17:18:05 crc kubenswrapper[4818]: I0930 17:18:05.710187 4818 generic.go:334] "Generic (PLEG): container finished" podID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerID="b410ca8bb44b63a76d0928ff5673fe8318568a3c8dfc40b0441c1c5de8034f79" exitCode=0 Sep 30 17:18:05 crc kubenswrapper[4818]: I0930 17:18:05.711090 4818 generic.go:334] "Generic (PLEG): container finished" podID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerID="26998148bb6a1728f3fd9f43b73d877e7602d95823b3ae076c0dce17b7043989" exitCode=2 Sep 30 17:18:05 crc kubenswrapper[4818]: I0930 17:18:05.711141 4818 generic.go:334] "Generic (PLEG): container finished" podID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerID="260eea95a1f4b3c8c76ee18cc786c0b6fe3cc7a06a5d1a31a4bf35b5098f19db" exitCode=0 Sep 30 17:18:05 crc kubenswrapper[4818]: I0930 17:18:05.710311 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9ea80203-2016-4a6e-9417-60a37d0ee336","Type":"ContainerDied","Data":"b410ca8bb44b63a76d0928ff5673fe8318568a3c8dfc40b0441c1c5de8034f79"} Sep 30 17:18:05 crc kubenswrapper[4818]: I0930 17:18:05.711209 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9ea80203-2016-4a6e-9417-60a37d0ee336","Type":"ContainerDied","Data":"26998148bb6a1728f3fd9f43b73d877e7602d95823b3ae076c0dce17b7043989"} Sep 30 17:18:05 crc kubenswrapper[4818]: I0930 17:18:05.711247 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9ea80203-2016-4a6e-9417-60a37d0ee336","Type":"ContainerDied","Data":"260eea95a1f4b3c8c76ee18cc786c0b6fe3cc7a06a5d1a31a4bf35b5098f19db"} Sep 30 17:18:09 crc kubenswrapper[4818]: I0930 17:18:09.749874 4818 generic.go:334] "Generic (PLEG): container finished" podID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerID="429f4d3b40e2ca1c851f8fde4e8eab556c84bb62df32cfdd91e386eca4089008" exitCode=0 Sep 30 17:18:09 crc kubenswrapper[4818]: I0930 17:18:09.749960 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9ea80203-2016-4a6e-9417-60a37d0ee336","Type":"ContainerDied","Data":"429f4d3b40e2ca1c851f8fde4e8eab556c84bb62df32cfdd91e386eca4089008"} Sep 30 17:18:09 crc kubenswrapper[4818]: I0930 17:18:09.830949 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:09 crc kubenswrapper[4818]: I0930 17:18:09.962972 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ea80203-2016-4a6e-9417-60a37d0ee336-run-httpd\") pod \"9ea80203-2016-4a6e-9417-60a37d0ee336\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " Sep 30 17:18:09 crc kubenswrapper[4818]: I0930 17:18:09.963132 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ea80203-2016-4a6e-9417-60a37d0ee336-log-httpd\") pod \"9ea80203-2016-4a6e-9417-60a37d0ee336\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " Sep 30 17:18:09 crc kubenswrapper[4818]: I0930 17:18:09.963215 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-config-data\") pod \"9ea80203-2016-4a6e-9417-60a37d0ee336\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " Sep 30 17:18:09 crc kubenswrapper[4818]: I0930 17:18:09.963242 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-scripts\") pod \"9ea80203-2016-4a6e-9417-60a37d0ee336\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " Sep 30 17:18:09 crc kubenswrapper[4818]: I0930 17:18:09.963278 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-sg-core-conf-yaml\") pod \"9ea80203-2016-4a6e-9417-60a37d0ee336\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " Sep 30 17:18:09 crc kubenswrapper[4818]: I0930 17:18:09.963331 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-psdtg\" (UniqueName: \"kubernetes.io/projected/9ea80203-2016-4a6e-9417-60a37d0ee336-kube-api-access-psdtg\") pod \"9ea80203-2016-4a6e-9417-60a37d0ee336\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " Sep 30 17:18:09 crc kubenswrapper[4818]: I0930 17:18:09.963368 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-combined-ca-bundle\") pod \"9ea80203-2016-4a6e-9417-60a37d0ee336\" (UID: \"9ea80203-2016-4a6e-9417-60a37d0ee336\") " Sep 30 17:18:09 crc kubenswrapper[4818]: I0930 17:18:09.963543 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ea80203-2016-4a6e-9417-60a37d0ee336-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9ea80203-2016-4a6e-9417-60a37d0ee336" (UID: "9ea80203-2016-4a6e-9417-60a37d0ee336"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:18:09 crc kubenswrapper[4818]: I0930 17:18:09.963732 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ea80203-2016-4a6e-9417-60a37d0ee336-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9ea80203-2016-4a6e-9417-60a37d0ee336" (UID: "9ea80203-2016-4a6e-9417-60a37d0ee336"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:18:09 crc kubenswrapper[4818]: I0930 17:18:09.964061 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ea80203-2016-4a6e-9417-60a37d0ee336-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:09 crc kubenswrapper[4818]: I0930 17:18:09.964084 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ea80203-2016-4a6e-9417-60a37d0ee336-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:09 crc kubenswrapper[4818]: I0930 17:18:09.968851 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ea80203-2016-4a6e-9417-60a37d0ee336-kube-api-access-psdtg" (OuterVolumeSpecName: "kube-api-access-psdtg") pod "9ea80203-2016-4a6e-9417-60a37d0ee336" (UID: "9ea80203-2016-4a6e-9417-60a37d0ee336"). InnerVolumeSpecName "kube-api-access-psdtg". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:18:09 crc kubenswrapper[4818]: I0930 17:18:09.972191 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-scripts" (OuterVolumeSpecName: "scripts") pod "9ea80203-2016-4a6e-9417-60a37d0ee336" (UID: "9ea80203-2016-4a6e-9417-60a37d0ee336"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.001092 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9ea80203-2016-4a6e-9417-60a37d0ee336" (UID: "9ea80203-2016-4a6e-9417-60a37d0ee336"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.048361 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9ea80203-2016-4a6e-9417-60a37d0ee336" (UID: "9ea80203-2016-4a6e-9417-60a37d0ee336"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.061819 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-config-data" (OuterVolumeSpecName: "config-data") pod "9ea80203-2016-4a6e-9417-60a37d0ee336" (UID: "9ea80203-2016-4a6e-9417-60a37d0ee336"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.065876 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.065906 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.065915 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.065930 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-psdtg\" (UniqueName: \"kubernetes.io/projected/9ea80203-2016-4a6e-9417-60a37d0ee336-kube-api-access-psdtg\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.065939 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ea80203-2016-4a6e-9417-60a37d0ee336-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.761792 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9ea80203-2016-4a6e-9417-60a37d0ee336","Type":"ContainerDied","Data":"02a2c9fa7c932a45c1a760833f5be8a354f7b77728ed28952dfd0d1f780907dc"} Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.761865 4818 scope.go:117] "RemoveContainer" containerID="b410ca8bb44b63a76d0928ff5673fe8318568a3c8dfc40b0441c1c5de8034f79" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.761887 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.784521 4818 scope.go:117] "RemoveContainer" containerID="26998148bb6a1728f3fd9f43b73d877e7602d95823b3ae076c0dce17b7043989" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.810501 4818 scope.go:117] "RemoveContainer" containerID="429f4d3b40e2ca1c851f8fde4e8eab556c84bb62df32cfdd91e386eca4089008" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.816556 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.824181 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.838083 4818 scope.go:117] "RemoveContainer" containerID="260eea95a1f4b3c8c76ee18cc786c0b6fe3cc7a06a5d1a31a4bf35b5098f19db" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.841987 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:18:10 crc kubenswrapper[4818]: E0930 17:18:10.842343 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerName="proxy-httpd" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.842359 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerName="proxy-httpd" Sep 30 17:18:10 crc kubenswrapper[4818]: E0930 17:18:10.842397 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerName="sg-core" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.842406 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerName="sg-core" Sep 30 17:18:10 crc kubenswrapper[4818]: E0930 17:18:10.842418 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerName="ceilometer-notification-agent" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.842426 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerName="ceilometer-notification-agent" Sep 30 17:18:10 crc kubenswrapper[4818]: E0930 17:18:10.842444 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerName="ceilometer-central-agent" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.842451 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerName="ceilometer-central-agent" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.842627 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerName="proxy-httpd" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.842643 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerName="sg-core" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.842661 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerName="ceilometer-central-agent" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.842679 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ea80203-2016-4a6e-9417-60a37d0ee336" containerName="ceilometer-notification-agent" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.844358 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.847978 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.848224 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.862029 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.990389 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.990661 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h55nx\" (UniqueName: \"kubernetes.io/projected/15a8d085-8f35-4a3a-92e3-45485ada7d6a-kube-api-access-h55nx\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.990738 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-scripts\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.990830 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.990895 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-config-data\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.991302 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15a8d085-8f35-4a3a-92e3-45485ada7d6a-log-httpd\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:10 crc kubenswrapper[4818]: I0930 17:18:10.991399 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15a8d085-8f35-4a3a-92e3-45485ada7d6a-run-httpd\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:11 crc kubenswrapper[4818]: I0930 17:18:11.092784 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15a8d085-8f35-4a3a-92e3-45485ada7d6a-run-httpd\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:11 crc kubenswrapper[4818]: I0930 17:18:11.093106 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:11 crc kubenswrapper[4818]: I0930 17:18:11.093200 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h55nx\" (UniqueName: \"kubernetes.io/projected/15a8d085-8f35-4a3a-92e3-45485ada7d6a-kube-api-access-h55nx\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:11 crc kubenswrapper[4818]: I0930 17:18:11.093291 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15a8d085-8f35-4a3a-92e3-45485ada7d6a-run-httpd\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:11 crc kubenswrapper[4818]: I0930 17:18:11.093416 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-scripts\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:11 crc kubenswrapper[4818]: I0930 17:18:11.093561 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:11 crc kubenswrapper[4818]: I0930 17:18:11.093675 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-config-data\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:11 crc kubenswrapper[4818]: I0930 17:18:11.093898 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15a8d085-8f35-4a3a-92e3-45485ada7d6a-log-httpd\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:11 crc kubenswrapper[4818]: I0930 17:18:11.094216 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15a8d085-8f35-4a3a-92e3-45485ada7d6a-log-httpd\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:11 crc kubenswrapper[4818]: I0930 17:18:11.099206 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:11 crc kubenswrapper[4818]: I0930 17:18:11.099497 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-config-data\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:11 crc kubenswrapper[4818]: I0930 17:18:11.103380 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:11 crc kubenswrapper[4818]: I0930 17:18:11.103454 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-scripts\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:11 crc kubenswrapper[4818]: I0930 17:18:11.115644 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h55nx\" (UniqueName: \"kubernetes.io/projected/15a8d085-8f35-4a3a-92e3-45485ada7d6a-kube-api-access-h55nx\") pod \"ceilometer-0\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:11 crc kubenswrapper[4818]: I0930 17:18:11.162772 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:11 crc kubenswrapper[4818]: I0930 17:18:11.603131 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:18:11 crc kubenswrapper[4818]: I0930 17:18:11.777700 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"15a8d085-8f35-4a3a-92e3-45485ada7d6a","Type":"ContainerStarted","Data":"efb2049f984e2356ac32f5153d30cb439463130d24ce82b31de28e8be9b80054"} Sep 30 17:18:12 crc kubenswrapper[4818]: I0930 17:18:12.034514 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ea80203-2016-4a6e-9417-60a37d0ee336" path="/var/lib/kubelet/pods/9ea80203-2016-4a6e-9417-60a37d0ee336/volumes" Sep 30 17:18:12 crc kubenswrapper[4818]: I0930 17:18:12.787253 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"15a8d085-8f35-4a3a-92e3-45485ada7d6a","Type":"ContainerStarted","Data":"744337fe2253f99170c847f2f4120e406d3c353cd3cfac6ae9897fcda67f1c90"} Sep 30 17:18:13 crc kubenswrapper[4818]: I0930 17:18:13.795818 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"15a8d085-8f35-4a3a-92e3-45485ada7d6a","Type":"ContainerStarted","Data":"d4d59a5ff26fb6df2f6750b85447a9bfe7ed58e400ebbaf7c9ff9afdf26e47d2"} Sep 30 17:18:13 crc kubenswrapper[4818]: I0930 17:18:13.796102 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"15a8d085-8f35-4a3a-92e3-45485ada7d6a","Type":"ContainerStarted","Data":"b27c5b91a7e8b702446c869453ccdf39680ae777060c05fd711bd577e355853d"} Sep 30 17:18:15 crc kubenswrapper[4818]: I0930 17:18:15.826509 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"15a8d085-8f35-4a3a-92e3-45485ada7d6a","Type":"ContainerStarted","Data":"1628459a183226463d86b0fb7dbb91ac842e89103c81a5c288100277b69822f1"} Sep 30 17:18:15 crc kubenswrapper[4818]: I0930 17:18:15.827220 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:15 crc kubenswrapper[4818]: I0930 17:18:15.856280 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=1.9716781719999998 podStartE2EDuration="5.856258791s" podCreationTimestamp="2025-09-30 17:18:10 +0000 UTC" firstStartedPulling="2025-09-30 17:18:11.613360287 +0000 UTC m=+1138.367632103" lastFinishedPulling="2025-09-30 17:18:15.497940906 +0000 UTC m=+1142.252212722" observedRunningTime="2025-09-30 17:18:15.856012825 +0000 UTC m=+1142.610284671" watchObservedRunningTime="2025-09-30 17:18:15.856258791 +0000 UTC m=+1142.610530647" Sep 30 17:18:22 crc kubenswrapper[4818]: I0930 17:18:22.596408 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:18:22 crc kubenswrapper[4818]: I0930 17:18:22.597015 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:18:22 crc kubenswrapper[4818]: I0930 17:18:22.597060 4818 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 17:18:22 crc kubenswrapper[4818]: I0930 17:18:22.597490 4818 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8f6686d61e096db5e2902b7d245395d3a5ea7e0fa983b9dcf9c5710b1f2ecad9"} pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 30 17:18:22 crc kubenswrapper[4818]: I0930 17:18:22.597532 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" containerID="cri-o://8f6686d61e096db5e2902b7d245395d3a5ea7e0fa983b9dcf9c5710b1f2ecad9" gracePeriod=600 Sep 30 17:18:22 crc kubenswrapper[4818]: I0930 17:18:22.892501 4818 generic.go:334] "Generic (PLEG): container finished" podID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerID="8f6686d61e096db5e2902b7d245395d3a5ea7e0fa983b9dcf9c5710b1f2ecad9" exitCode=0 Sep 30 17:18:22 crc kubenswrapper[4818]: I0930 17:18:22.892543 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerDied","Data":"8f6686d61e096db5e2902b7d245395d3a5ea7e0fa983b9dcf9c5710b1f2ecad9"} Sep 30 17:18:22 crc kubenswrapper[4818]: I0930 17:18:22.892572 4818 scope.go:117] "RemoveContainer" containerID="bef732d2824af20a982f56bcc38b49ae15a3f1c74de5e344956d8799c207e863" Sep 30 17:18:23 crc kubenswrapper[4818]: I0930 17:18:23.903475 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerStarted","Data":"ae5094dfd804c3f512a41e1f23be19d77cd5136dc31ac2ab100aaebcb668c7b1"} Sep 30 17:18:25 crc kubenswrapper[4818]: I0930 17:18:25.467115 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.099487 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/openstackclient"] Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.101651 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.105685 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"openstack-config-secret" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.105879 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"openstack-config" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.106033 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"openstackclient-openstackclient-dockercfg-dl4pl" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.119118 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/openstackclient"] Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.176370 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/openstackclient"] Sep 30 17:18:30 crc kubenswrapper[4818]: E0930 17:18:30.176937 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle kube-api-access-2v95q openstack-config openstack-config-secret], unattached volumes=[], failed to process volumes=[]: context canceled" pod="watcher-kuttl-default/openstackclient" podUID="9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.184796 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/openstackclient"] Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.207827 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/openstackclient"] Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.209039 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.231980 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2v95q\" (UniqueName: \"kubernetes.io/projected/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-kube-api-access-2v95q\") pod \"openstackclient\" (UID: \"9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.232062 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-openstack-config\") pod \"openstackclient\" (UID: \"9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.234032 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/openstackclient"] Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.236435 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-openstack-config-secret\") pod \"openstackclient\" (UID: \"9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.236624 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-combined-ca-bundle\") pod \"openstackclient\" (UID: \"9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.337768 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-combined-ca-bundle\") pod \"openstackclient\" (UID: \"9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.337846 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9470aa86-6546-4a31-a0d5-6377490de3b1-openstack-config\") pod \"openstackclient\" (UID: \"9470aa86-6546-4a31-a0d5-6377490de3b1\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.337875 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2v95q\" (UniqueName: \"kubernetes.io/projected/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-kube-api-access-2v95q\") pod \"openstackclient\" (UID: \"9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.337896 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-openstack-config\") pod \"openstackclient\" (UID: \"9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.337933 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9470aa86-6546-4a31-a0d5-6377490de3b1-combined-ca-bundle\") pod \"openstackclient\" (UID: \"9470aa86-6546-4a31-a0d5-6377490de3b1\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.337955 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9470aa86-6546-4a31-a0d5-6377490de3b1-openstack-config-secret\") pod \"openstackclient\" (UID: \"9470aa86-6546-4a31-a0d5-6377490de3b1\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.337982 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-openstack-config-secret\") pod \"openstackclient\" (UID: \"9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.338002 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lk5t\" (UniqueName: \"kubernetes.io/projected/9470aa86-6546-4a31-a0d5-6377490de3b1-kube-api-access-8lk5t\") pod \"openstackclient\" (UID: \"9470aa86-6546-4a31-a0d5-6377490de3b1\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.339077 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-openstack-config\") pod \"openstackclient\" (UID: \"9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: E0930 17:18:30.339870 4818 projected.go:194] Error preparing data for projected volume kube-api-access-2v95q for pod watcher-kuttl-default/openstackclient: failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde) does not match the UID in record. The object might have been deleted and then recreated Sep 30 17:18:30 crc kubenswrapper[4818]: E0930 17:18:30.339912 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-kube-api-access-2v95q podName:9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde nodeName:}" failed. No retries permitted until 2025-09-30 17:18:30.83989894 +0000 UTC m=+1157.594170756 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-2v95q" (UniqueName: "kubernetes.io/projected/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-kube-api-access-2v95q") pod "openstackclient" (UID: "9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde") : failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde) does not match the UID in record. The object might have been deleted and then recreated Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.344423 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-combined-ca-bundle\") pod \"openstackclient\" (UID: \"9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.354301 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-openstack-config-secret\") pod \"openstackclient\" (UID: \"9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.439727 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9470aa86-6546-4a31-a0d5-6377490de3b1-openstack-config\") pod \"openstackclient\" (UID: \"9470aa86-6546-4a31-a0d5-6377490de3b1\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.439794 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9470aa86-6546-4a31-a0d5-6377490de3b1-combined-ca-bundle\") pod \"openstackclient\" (UID: \"9470aa86-6546-4a31-a0d5-6377490de3b1\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.439831 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9470aa86-6546-4a31-a0d5-6377490de3b1-openstack-config-secret\") pod \"openstackclient\" (UID: \"9470aa86-6546-4a31-a0d5-6377490de3b1\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.440486 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9470aa86-6546-4a31-a0d5-6377490de3b1-openstack-config\") pod \"openstackclient\" (UID: \"9470aa86-6546-4a31-a0d5-6377490de3b1\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.439864 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lk5t\" (UniqueName: \"kubernetes.io/projected/9470aa86-6546-4a31-a0d5-6377490de3b1-kube-api-access-8lk5t\") pod \"openstackclient\" (UID: \"9470aa86-6546-4a31-a0d5-6377490de3b1\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.445628 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9470aa86-6546-4a31-a0d5-6377490de3b1-combined-ca-bundle\") pod \"openstackclient\" (UID: \"9470aa86-6546-4a31-a0d5-6377490de3b1\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.446855 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9470aa86-6546-4a31-a0d5-6377490de3b1-openstack-config-secret\") pod \"openstackclient\" (UID: \"9470aa86-6546-4a31-a0d5-6377490de3b1\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.460716 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lk5t\" (UniqueName: \"kubernetes.io/projected/9470aa86-6546-4a31-a0d5-6377490de3b1-kube-api-access-8lk5t\") pod \"openstackclient\" (UID: \"9470aa86-6546-4a31-a0d5-6377490de3b1\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.532145 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.858794 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2v95q\" (UniqueName: \"kubernetes.io/projected/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-kube-api-access-2v95q\") pod \"openstackclient\" (UID: \"9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde\") " pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: E0930 17:18:30.863486 4818 projected.go:194] Error preparing data for projected volume kube-api-access-2v95q for pod watcher-kuttl-default/openstackclient: failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde) does not match the UID in record. The object might have been deleted and then recreated Sep 30 17:18:30 crc kubenswrapper[4818]: E0930 17:18:30.863562 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-kube-api-access-2v95q podName:9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde nodeName:}" failed. No retries permitted until 2025-09-30 17:18:31.863539163 +0000 UTC m=+1158.617810999 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-2v95q" (UniqueName: "kubernetes.io/projected/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-kube-api-access-2v95q") pod "openstackclient" (UID: "9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde") : failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde) does not match the UID in record. The object might have been deleted and then recreated Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.970609 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.976948 4818 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="watcher-kuttl-default/openstackclient" oldPodUID="9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde" podUID="9470aa86-6546-4a31-a0d5-6377490de3b1" Sep 30 17:18:30 crc kubenswrapper[4818]: I0930 17:18:30.979836 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:31 crc kubenswrapper[4818]: I0930 17:18:31.040583 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/openstackclient"] Sep 30 17:18:31 crc kubenswrapper[4818]: W0930 17:18:31.049837 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9470aa86_6546_4a31_a0d5_6377490de3b1.slice/crio-a5320cbee7262fad3b581f3b39321abfb2cd4b7ef887f30673557af11df4f939 WatchSource:0}: Error finding container a5320cbee7262fad3b581f3b39321abfb2cd4b7ef887f30673557af11df4f939: Status 404 returned error can't find the container with id a5320cbee7262fad3b581f3b39321abfb2cd4b7ef887f30673557af11df4f939 Sep 30 17:18:31 crc kubenswrapper[4818]: I0930 17:18:31.163011 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-openstack-config\") pod \"9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde\" (UID: \"9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde\") " Sep 30 17:18:31 crc kubenswrapper[4818]: I0930 17:18:31.163155 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-openstack-config-secret\") pod \"9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde\" (UID: \"9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde\") " Sep 30 17:18:31 crc kubenswrapper[4818]: I0930 17:18:31.163438 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-combined-ca-bundle\") pod \"9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde\" (UID: \"9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde\") " Sep 30 17:18:31 crc kubenswrapper[4818]: I0930 17:18:31.163470 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde" (UID: "9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:18:31 crc kubenswrapper[4818]: I0930 17:18:31.164376 4818 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-openstack-config\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:31 crc kubenswrapper[4818]: I0930 17:18:31.164413 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2v95q\" (UniqueName: \"kubernetes.io/projected/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-kube-api-access-2v95q\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:31 crc kubenswrapper[4818]: I0930 17:18:31.168058 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde" (UID: "9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:18:31 crc kubenswrapper[4818]: I0930 17:18:31.168508 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde" (UID: "9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:18:31 crc kubenswrapper[4818]: I0930 17:18:31.265398 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:31 crc kubenswrapper[4818]: I0930 17:18:31.265430 4818 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:31 crc kubenswrapper[4818]: I0930 17:18:31.979246 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/openstackclient" Sep 30 17:18:31 crc kubenswrapper[4818]: I0930 17:18:31.980026 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/openstackclient" event={"ID":"9470aa86-6546-4a31-a0d5-6377490de3b1","Type":"ContainerStarted","Data":"a5320cbee7262fad3b581f3b39321abfb2cd4b7ef887f30673557af11df4f939"} Sep 30 17:18:31 crc kubenswrapper[4818]: I0930 17:18:31.985726 4818 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="watcher-kuttl-default/openstackclient" oldPodUID="9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde" podUID="9470aa86-6546-4a31-a0d5-6377490de3b1" Sep 30 17:18:32 crc kubenswrapper[4818]: I0930 17:18:32.042650 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde" path="/var/lib/kubelet/pods/9e2cff4e-1e3a-4615-8b4f-cf236e0bdbde/volumes" Sep 30 17:18:40 crc kubenswrapper[4818]: I0930 17:18:40.047596 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/openstackclient" event={"ID":"9470aa86-6546-4a31-a0d5-6377490de3b1","Type":"ContainerStarted","Data":"edb65937c6cfd48abd1e8ef1c400539051a61c4e2f18cf3d746e736c05ec70f4"} Sep 30 17:18:40 crc kubenswrapper[4818]: I0930 17:18:40.065717 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/openstackclient" podStartSLOduration=1.301804079 podStartE2EDuration="10.065698871s" podCreationTimestamp="2025-09-30 17:18:30 +0000 UTC" firstStartedPulling="2025-09-30 17:18:31.05212114 +0000 UTC m=+1157.806392956" lastFinishedPulling="2025-09-30 17:18:39.816015942 +0000 UTC m=+1166.570287748" observedRunningTime="2025-09-30 17:18:40.06417268 +0000 UTC m=+1166.818444506" watchObservedRunningTime="2025-09-30 17:18:40.065698871 +0000 UTC m=+1166.819970707" Sep 30 17:18:41 crc kubenswrapper[4818]: I0930 17:18:41.168475 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:43 crc kubenswrapper[4818]: I0930 17:18:43.449695 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/kube-state-metrics-0"] Sep 30 17:18:43 crc kubenswrapper[4818]: I0930 17:18:43.450333 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/kube-state-metrics-0" podUID="808a26d0-141a-4e9e-8920-933c31423097" containerName="kube-state-metrics" containerID="cri-o://5de37df6358aea0ebc5871a973fa96cc0cb9c6cc58b5ffb760db9cdac7e436ea" gracePeriod=30 Sep 30 17:18:43 crc kubenswrapper[4818]: I0930 17:18:43.967158 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.078213 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54vp5\" (UniqueName: \"kubernetes.io/projected/808a26d0-141a-4e9e-8920-933c31423097-kube-api-access-54vp5\") pod \"808a26d0-141a-4e9e-8920-933c31423097\" (UID: \"808a26d0-141a-4e9e-8920-933c31423097\") " Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.078413 4818 generic.go:334] "Generic (PLEG): container finished" podID="808a26d0-141a-4e9e-8920-933c31423097" containerID="5de37df6358aea0ebc5871a973fa96cc0cb9c6cc58b5ffb760db9cdac7e436ea" exitCode=2 Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.078448 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/kube-state-metrics-0" event={"ID":"808a26d0-141a-4e9e-8920-933c31423097","Type":"ContainerDied","Data":"5de37df6358aea0ebc5871a973fa96cc0cb9c6cc58b5ffb760db9cdac7e436ea"} Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.078472 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/kube-state-metrics-0" event={"ID":"808a26d0-141a-4e9e-8920-933c31423097","Type":"ContainerDied","Data":"b2273c16b344157bbfb2b455f500155861652d4b92fc8cc889564b55a5e07225"} Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.078471 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.078489 4818 scope.go:117] "RemoveContainer" containerID="5de37df6358aea0ebc5871a973fa96cc0cb9c6cc58b5ffb760db9cdac7e436ea" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.083636 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/808a26d0-141a-4e9e-8920-933c31423097-kube-api-access-54vp5" (OuterVolumeSpecName: "kube-api-access-54vp5") pod "808a26d0-141a-4e9e-8920-933c31423097" (UID: "808a26d0-141a-4e9e-8920-933c31423097"). InnerVolumeSpecName "kube-api-access-54vp5". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.129865 4818 scope.go:117] "RemoveContainer" containerID="5de37df6358aea0ebc5871a973fa96cc0cb9c6cc58b5ffb760db9cdac7e436ea" Sep 30 17:18:44 crc kubenswrapper[4818]: E0930 17:18:44.130666 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5de37df6358aea0ebc5871a973fa96cc0cb9c6cc58b5ffb760db9cdac7e436ea\": container with ID starting with 5de37df6358aea0ebc5871a973fa96cc0cb9c6cc58b5ffb760db9cdac7e436ea not found: ID does not exist" containerID="5de37df6358aea0ebc5871a973fa96cc0cb9c6cc58b5ffb760db9cdac7e436ea" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.130710 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5de37df6358aea0ebc5871a973fa96cc0cb9c6cc58b5ffb760db9cdac7e436ea"} err="failed to get container status \"5de37df6358aea0ebc5871a973fa96cc0cb9c6cc58b5ffb760db9cdac7e436ea\": rpc error: code = NotFound desc = could not find container \"5de37df6358aea0ebc5871a973fa96cc0cb9c6cc58b5ffb760db9cdac7e436ea\": container with ID starting with 5de37df6358aea0ebc5871a973fa96cc0cb9c6cc58b5ffb760db9cdac7e436ea not found: ID does not exist" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.179773 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54vp5\" (UniqueName: \"kubernetes.io/projected/808a26d0-141a-4e9e-8920-933c31423097-kube-api-access-54vp5\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.408867 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/kube-state-metrics-0"] Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.414424 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/kube-state-metrics-0"] Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.438276 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/kube-state-metrics-0"] Sep 30 17:18:44 crc kubenswrapper[4818]: E0930 17:18:44.439016 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="808a26d0-141a-4e9e-8920-933c31423097" containerName="kube-state-metrics" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.439060 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="808a26d0-141a-4e9e-8920-933c31423097" containerName="kube-state-metrics" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.439371 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="808a26d0-141a-4e9e-8920-933c31423097" containerName="kube-state-metrics" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.440430 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.441855 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-kube-state-metrics-svc" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.444725 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"kube-state-metrics-tls-config" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.448047 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/kube-state-metrics-0"] Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.588100 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0f696f5-cad9-4a6b-8f09-2a7f6db599b0-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"b0f696f5-cad9-4a6b-8f09-2a7f6db599b0\") " pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.590302 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7tz7\" (UniqueName: \"kubernetes.io/projected/b0f696f5-cad9-4a6b-8f09-2a7f6db599b0-kube-api-access-b7tz7\") pod \"kube-state-metrics-0\" (UID: \"b0f696f5-cad9-4a6b-8f09-2a7f6db599b0\") " pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.590681 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/b0f696f5-cad9-4a6b-8f09-2a7f6db599b0-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"b0f696f5-cad9-4a6b-8f09-2a7f6db599b0\") " pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.590881 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0f696f5-cad9-4a6b-8f09-2a7f6db599b0-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"b0f696f5-cad9-4a6b-8f09-2a7f6db599b0\") " pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.658237 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.658466 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerName="ceilometer-central-agent" containerID="cri-o://744337fe2253f99170c847f2f4120e406d3c353cd3cfac6ae9897fcda67f1c90" gracePeriod=30 Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.658795 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerName="proxy-httpd" containerID="cri-o://1628459a183226463d86b0fb7dbb91ac842e89103c81a5c288100277b69822f1" gracePeriod=30 Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.658844 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerName="sg-core" containerID="cri-o://d4d59a5ff26fb6df2f6750b85447a9bfe7ed58e400ebbaf7c9ff9afdf26e47d2" gracePeriod=30 Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.658875 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerName="ceilometer-notification-agent" containerID="cri-o://b27c5b91a7e8b702446c869453ccdf39680ae777060c05fd711bd577e355853d" gracePeriod=30 Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.692433 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0f696f5-cad9-4a6b-8f09-2a7f6db599b0-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"b0f696f5-cad9-4a6b-8f09-2a7f6db599b0\") " pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.692500 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7tz7\" (UniqueName: \"kubernetes.io/projected/b0f696f5-cad9-4a6b-8f09-2a7f6db599b0-kube-api-access-b7tz7\") pod \"kube-state-metrics-0\" (UID: \"b0f696f5-cad9-4a6b-8f09-2a7f6db599b0\") " pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.692548 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/b0f696f5-cad9-4a6b-8f09-2a7f6db599b0-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"b0f696f5-cad9-4a6b-8f09-2a7f6db599b0\") " pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.692585 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0f696f5-cad9-4a6b-8f09-2a7f6db599b0-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"b0f696f5-cad9-4a6b-8f09-2a7f6db599b0\") " pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.698225 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0f696f5-cad9-4a6b-8f09-2a7f6db599b0-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"b0f696f5-cad9-4a6b-8f09-2a7f6db599b0\") " pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.705524 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/b0f696f5-cad9-4a6b-8f09-2a7f6db599b0-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"b0f696f5-cad9-4a6b-8f09-2a7f6db599b0\") " pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.706134 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0f696f5-cad9-4a6b-8f09-2a7f6db599b0-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"b0f696f5-cad9-4a6b-8f09-2a7f6db599b0\") " pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.715856 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7tz7\" (UniqueName: \"kubernetes.io/projected/b0f696f5-cad9-4a6b-8f09-2a7f6db599b0-kube-api-access-b7tz7\") pod \"kube-state-metrics-0\" (UID: \"b0f696f5-cad9-4a6b-8f09-2a7f6db599b0\") " pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:18:44 crc kubenswrapper[4818]: I0930 17:18:44.801223 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:18:45 crc kubenswrapper[4818]: I0930 17:18:45.089023 4818 generic.go:334] "Generic (PLEG): container finished" podID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerID="1628459a183226463d86b0fb7dbb91ac842e89103c81a5c288100277b69822f1" exitCode=0 Sep 30 17:18:45 crc kubenswrapper[4818]: I0930 17:18:45.089314 4818 generic.go:334] "Generic (PLEG): container finished" podID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerID="d4d59a5ff26fb6df2f6750b85447a9bfe7ed58e400ebbaf7c9ff9afdf26e47d2" exitCode=2 Sep 30 17:18:45 crc kubenswrapper[4818]: I0930 17:18:45.089326 4818 generic.go:334] "Generic (PLEG): container finished" podID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerID="744337fe2253f99170c847f2f4120e406d3c353cd3cfac6ae9897fcda67f1c90" exitCode=0 Sep 30 17:18:45 crc kubenswrapper[4818]: I0930 17:18:45.089375 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"15a8d085-8f35-4a3a-92e3-45485ada7d6a","Type":"ContainerDied","Data":"1628459a183226463d86b0fb7dbb91ac842e89103c81a5c288100277b69822f1"} Sep 30 17:18:45 crc kubenswrapper[4818]: I0930 17:18:45.089406 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"15a8d085-8f35-4a3a-92e3-45485ada7d6a","Type":"ContainerDied","Data":"d4d59a5ff26fb6df2f6750b85447a9bfe7ed58e400ebbaf7c9ff9afdf26e47d2"} Sep 30 17:18:45 crc kubenswrapper[4818]: I0930 17:18:45.089420 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"15a8d085-8f35-4a3a-92e3-45485ada7d6a","Type":"ContainerDied","Data":"744337fe2253f99170c847f2f4120e406d3c353cd3cfac6ae9897fcda67f1c90"} Sep 30 17:18:45 crc kubenswrapper[4818]: I0930 17:18:45.303985 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/kube-state-metrics-0"] Sep 30 17:18:46 crc kubenswrapper[4818]: I0930 17:18:46.030634 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="808a26d0-141a-4e9e-8920-933c31423097" path="/var/lib/kubelet/pods/808a26d0-141a-4e9e-8920-933c31423097/volumes" Sep 30 17:18:46 crc kubenswrapper[4818]: I0930 17:18:46.103293 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/kube-state-metrics-0" event={"ID":"b0f696f5-cad9-4a6b-8f09-2a7f6db599b0","Type":"ContainerStarted","Data":"2ba6595009d1f5397c949b48e5c0e81ee8c68e0d2f2b1cda8e7e2c0e502f65b9"} Sep 30 17:18:46 crc kubenswrapper[4818]: I0930 17:18:46.103355 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/kube-state-metrics-0" event={"ID":"b0f696f5-cad9-4a6b-8f09-2a7f6db599b0","Type":"ContainerStarted","Data":"696b7471a0ab42ee86fade3b8d8dc1702e82ae70b4f83d188453ef72c7242cdf"} Sep 30 17:18:46 crc kubenswrapper[4818]: I0930 17:18:46.104074 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:18:46 crc kubenswrapper[4818]: I0930 17:18:46.127012 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/kube-state-metrics-0" podStartSLOduration=1.7711700719999999 podStartE2EDuration="2.12698953s" podCreationTimestamp="2025-09-30 17:18:44 +0000 UTC" firstStartedPulling="2025-09-30 17:18:45.313226393 +0000 UTC m=+1172.067498209" lastFinishedPulling="2025-09-30 17:18:45.669045851 +0000 UTC m=+1172.423317667" observedRunningTime="2025-09-30 17:18:46.121888712 +0000 UTC m=+1172.876160528" watchObservedRunningTime="2025-09-30 17:18:46.12698953 +0000 UTC m=+1172.881261346" Sep 30 17:18:46 crc kubenswrapper[4818]: I0930 17:18:46.727161 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-db-create-zxlqp"] Sep 30 17:18:46 crc kubenswrapper[4818]: I0930 17:18:46.728455 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-zxlqp" Sep 30 17:18:46 crc kubenswrapper[4818]: I0930 17:18:46.736750 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-zxlqp"] Sep 30 17:18:46 crc kubenswrapper[4818]: I0930 17:18:46.829428 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lxh8w\" (UniqueName: \"kubernetes.io/projected/c64f135c-2d61-43f9-8e93-b4067f1a3103-kube-api-access-lxh8w\") pod \"watcher-db-create-zxlqp\" (UID: \"c64f135c-2d61-43f9-8e93-b4067f1a3103\") " pod="watcher-kuttl-default/watcher-db-create-zxlqp" Sep 30 17:18:46 crc kubenswrapper[4818]: I0930 17:18:46.930540 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxh8w\" (UniqueName: \"kubernetes.io/projected/c64f135c-2d61-43f9-8e93-b4067f1a3103-kube-api-access-lxh8w\") pod \"watcher-db-create-zxlqp\" (UID: \"c64f135c-2d61-43f9-8e93-b4067f1a3103\") " pod="watcher-kuttl-default/watcher-db-create-zxlqp" Sep 30 17:18:46 crc kubenswrapper[4818]: I0930 17:18:46.956762 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lxh8w\" (UniqueName: \"kubernetes.io/projected/c64f135c-2d61-43f9-8e93-b4067f1a3103-kube-api-access-lxh8w\") pod \"watcher-db-create-zxlqp\" (UID: \"c64f135c-2d61-43f9-8e93-b4067f1a3103\") " pod="watcher-kuttl-default/watcher-db-create-zxlqp" Sep 30 17:18:47 crc kubenswrapper[4818]: I0930 17:18:47.050290 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-zxlqp" Sep 30 17:18:47 crc kubenswrapper[4818]: I0930 17:18:47.532737 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-zxlqp"] Sep 30 17:18:47 crc kubenswrapper[4818]: W0930 17:18:47.540042 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc64f135c_2d61_43f9_8e93_b4067f1a3103.slice/crio-b55fc9467c8b4adcfea1746a9acc1e96d0b9add2a2f447d75dfc66faab38ee89 WatchSource:0}: Error finding container b55fc9467c8b4adcfea1746a9acc1e96d0b9add2a2f447d75dfc66faab38ee89: Status 404 returned error can't find the container with id b55fc9467c8b4adcfea1746a9acc1e96d0b9add2a2f447d75dfc66faab38ee89 Sep 30 17:18:48 crc kubenswrapper[4818]: I0930 17:18:48.122820 4818 generic.go:334] "Generic (PLEG): container finished" podID="c64f135c-2d61-43f9-8e93-b4067f1a3103" containerID="1796ea9b7e073f5c8595a00dbe95c455d8e2979867dc479c50c0bfc3fc75112d" exitCode=0 Sep 30 17:18:48 crc kubenswrapper[4818]: I0930 17:18:48.122900 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-zxlqp" event={"ID":"c64f135c-2d61-43f9-8e93-b4067f1a3103","Type":"ContainerDied","Data":"1796ea9b7e073f5c8595a00dbe95c455d8e2979867dc479c50c0bfc3fc75112d"} Sep 30 17:18:48 crc kubenswrapper[4818]: I0930 17:18:48.123180 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-zxlqp" event={"ID":"c64f135c-2d61-43f9-8e93-b4067f1a3103","Type":"ContainerStarted","Data":"b55fc9467c8b4adcfea1746a9acc1e96d0b9add2a2f447d75dfc66faab38ee89"} Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.134166 4818 generic.go:334] "Generic (PLEG): container finished" podID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerID="b27c5b91a7e8b702446c869453ccdf39680ae777060c05fd711bd577e355853d" exitCode=0 Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.134270 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"15a8d085-8f35-4a3a-92e3-45485ada7d6a","Type":"ContainerDied","Data":"b27c5b91a7e8b702446c869453ccdf39680ae777060c05fd711bd577e355853d"} Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.267080 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.304172 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-sg-core-conf-yaml\") pod \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.304219 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h55nx\" (UniqueName: \"kubernetes.io/projected/15a8d085-8f35-4a3a-92e3-45485ada7d6a-kube-api-access-h55nx\") pod \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.304250 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-combined-ca-bundle\") pod \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.304274 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15a8d085-8f35-4a3a-92e3-45485ada7d6a-run-httpd\") pod \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.304321 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-scripts\") pod \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.304377 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15a8d085-8f35-4a3a-92e3-45485ada7d6a-log-httpd\") pod \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.304398 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-config-data\") pod \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\" (UID: \"15a8d085-8f35-4a3a-92e3-45485ada7d6a\") " Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.307060 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15a8d085-8f35-4a3a-92e3-45485ada7d6a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "15a8d085-8f35-4a3a-92e3-45485ada7d6a" (UID: "15a8d085-8f35-4a3a-92e3-45485ada7d6a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.308447 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15a8d085-8f35-4a3a-92e3-45485ada7d6a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "15a8d085-8f35-4a3a-92e3-45485ada7d6a" (UID: "15a8d085-8f35-4a3a-92e3-45485ada7d6a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.314442 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15a8d085-8f35-4a3a-92e3-45485ada7d6a-kube-api-access-h55nx" (OuterVolumeSpecName: "kube-api-access-h55nx") pod "15a8d085-8f35-4a3a-92e3-45485ada7d6a" (UID: "15a8d085-8f35-4a3a-92e3-45485ada7d6a"). InnerVolumeSpecName "kube-api-access-h55nx". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.327595 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-scripts" (OuterVolumeSpecName: "scripts") pod "15a8d085-8f35-4a3a-92e3-45485ada7d6a" (UID: "15a8d085-8f35-4a3a-92e3-45485ada7d6a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.350948 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "15a8d085-8f35-4a3a-92e3-45485ada7d6a" (UID: "15a8d085-8f35-4a3a-92e3-45485ada7d6a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.377901 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "15a8d085-8f35-4a3a-92e3-45485ada7d6a" (UID: "15a8d085-8f35-4a3a-92e3-45485ada7d6a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.407942 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15a8d085-8f35-4a3a-92e3-45485ada7d6a-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.407994 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.408004 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h55nx\" (UniqueName: \"kubernetes.io/projected/15a8d085-8f35-4a3a-92e3-45485ada7d6a-kube-api-access-h55nx\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.408013 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.408021 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15a8d085-8f35-4a3a-92e3-45485ada7d6a-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.408029 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.428106 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-config-data" (OuterVolumeSpecName: "config-data") pod "15a8d085-8f35-4a3a-92e3-45485ada7d6a" (UID: "15a8d085-8f35-4a3a-92e3-45485ada7d6a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.456173 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-zxlqp" Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.510023 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15a8d085-8f35-4a3a-92e3-45485ada7d6a-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.611016 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lxh8w\" (UniqueName: \"kubernetes.io/projected/c64f135c-2d61-43f9-8e93-b4067f1a3103-kube-api-access-lxh8w\") pod \"c64f135c-2d61-43f9-8e93-b4067f1a3103\" (UID: \"c64f135c-2d61-43f9-8e93-b4067f1a3103\") " Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.616074 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c64f135c-2d61-43f9-8e93-b4067f1a3103-kube-api-access-lxh8w" (OuterVolumeSpecName: "kube-api-access-lxh8w") pod "c64f135c-2d61-43f9-8e93-b4067f1a3103" (UID: "c64f135c-2d61-43f9-8e93-b4067f1a3103"). InnerVolumeSpecName "kube-api-access-lxh8w". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:18:49 crc kubenswrapper[4818]: I0930 17:18:49.712600 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lxh8w\" (UniqueName: \"kubernetes.io/projected/c64f135c-2d61-43f9-8e93-b4067f1a3103-kube-api-access-lxh8w\") on node \"crc\" DevicePath \"\"" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.143251 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"15a8d085-8f35-4a3a-92e3-45485ada7d6a","Type":"ContainerDied","Data":"efb2049f984e2356ac32f5153d30cb439463130d24ce82b31de28e8be9b80054"} Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.143319 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.144128 4818 scope.go:117] "RemoveContainer" containerID="1628459a183226463d86b0fb7dbb91ac842e89103c81a5c288100277b69822f1" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.146359 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-zxlqp" event={"ID":"c64f135c-2d61-43f9-8e93-b4067f1a3103","Type":"ContainerDied","Data":"b55fc9467c8b4adcfea1746a9acc1e96d0b9add2a2f447d75dfc66faab38ee89"} Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.146389 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b55fc9467c8b4adcfea1746a9acc1e96d0b9add2a2f447d75dfc66faab38ee89" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.146469 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-zxlqp" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.186828 4818 scope.go:117] "RemoveContainer" containerID="d4d59a5ff26fb6df2f6750b85447a9bfe7ed58e400ebbaf7c9ff9afdf26e47d2" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.187027 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.200494 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.212960 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:18:50 crc kubenswrapper[4818]: E0930 17:18:50.213334 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerName="ceilometer-notification-agent" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.213351 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerName="ceilometer-notification-agent" Sep 30 17:18:50 crc kubenswrapper[4818]: E0930 17:18:50.213364 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerName="ceilometer-central-agent" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.213371 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerName="ceilometer-central-agent" Sep 30 17:18:50 crc kubenswrapper[4818]: E0930 17:18:50.213379 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerName="proxy-httpd" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.213387 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerName="proxy-httpd" Sep 30 17:18:50 crc kubenswrapper[4818]: E0930 17:18:50.213406 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c64f135c-2d61-43f9-8e93-b4067f1a3103" containerName="mariadb-database-create" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.213412 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="c64f135c-2d61-43f9-8e93-b4067f1a3103" containerName="mariadb-database-create" Sep 30 17:18:50 crc kubenswrapper[4818]: E0930 17:18:50.213421 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerName="sg-core" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.213427 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerName="sg-core" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.213574 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerName="sg-core" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.213587 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerName="proxy-httpd" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.213596 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerName="ceilometer-central-agent" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.213610 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="c64f135c-2d61-43f9-8e93-b4067f1a3103" containerName="mariadb-database-create" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.213620 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" containerName="ceilometer-notification-agent" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.215557 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.218138 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.218494 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.218651 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.221044 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-scripts\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.221113 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.221155 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.221227 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b4f31b1-3648-46e9-b292-251768678021-log-httpd\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.221273 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r75fq\" (UniqueName: \"kubernetes.io/projected/1b4f31b1-3648-46e9-b292-251768678021-kube-api-access-r75fq\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.221306 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.221344 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b4f31b1-3648-46e9-b292-251768678021-run-httpd\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.221364 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-config-data\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.226858 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.241508 4818 scope.go:117] "RemoveContainer" containerID="b27c5b91a7e8b702446c869453ccdf39680ae777060c05fd711bd577e355853d" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.261535 4818 scope.go:117] "RemoveContainer" containerID="744337fe2253f99170c847f2f4120e406d3c353cd3cfac6ae9897fcda67f1c90" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.323205 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-scripts\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.323299 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.323329 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.323380 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b4f31b1-3648-46e9-b292-251768678021-log-httpd\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.323423 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r75fq\" (UniqueName: \"kubernetes.io/projected/1b4f31b1-3648-46e9-b292-251768678021-kube-api-access-r75fq\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.323453 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.323478 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b4f31b1-3648-46e9-b292-251768678021-run-httpd\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.323951 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b4f31b1-3648-46e9-b292-251768678021-log-httpd\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.323969 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b4f31b1-3648-46e9-b292-251768678021-run-httpd\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.324552 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-config-data\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.333524 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-scripts\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.335175 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-config-data\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.335711 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.336429 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.337329 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.343746 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r75fq\" (UniqueName: \"kubernetes.io/projected/1b4f31b1-3648-46e9-b292-251768678021-kube-api-access-r75fq\") pod \"ceilometer-0\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:50 crc kubenswrapper[4818]: I0930 17:18:50.545718 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:51 crc kubenswrapper[4818]: I0930 17:18:51.022028 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:18:51 crc kubenswrapper[4818]: W0930 17:18:51.037142 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b4f31b1_3648_46e9_b292_251768678021.slice/crio-d2c4fc12d5e8c279534f69d2d497f323564f55abf80c909e7f477dd522e43012 WatchSource:0}: Error finding container d2c4fc12d5e8c279534f69d2d497f323564f55abf80c909e7f477dd522e43012: Status 404 returned error can't find the container with id d2c4fc12d5e8c279534f69d2d497f323564f55abf80c909e7f477dd522e43012 Sep 30 17:18:51 crc kubenswrapper[4818]: I0930 17:18:51.153525 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1b4f31b1-3648-46e9-b292-251768678021","Type":"ContainerStarted","Data":"d2c4fc12d5e8c279534f69d2d497f323564f55abf80c909e7f477dd522e43012"} Sep 30 17:18:52 crc kubenswrapper[4818]: I0930 17:18:52.030157 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15a8d085-8f35-4a3a-92e3-45485ada7d6a" path="/var/lib/kubelet/pods/15a8d085-8f35-4a3a-92e3-45485ada7d6a/volumes" Sep 30 17:18:52 crc kubenswrapper[4818]: I0930 17:18:52.162310 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1b4f31b1-3648-46e9-b292-251768678021","Type":"ContainerStarted","Data":"5953bd399c584a71aaad3681d5016df4ce7bd399b9e009c813aa327236b56255"} Sep 30 17:18:53 crc kubenswrapper[4818]: I0930 17:18:53.171594 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1b4f31b1-3648-46e9-b292-251768678021","Type":"ContainerStarted","Data":"057b6d1c953e8065c04be635fa752fd2f6d97146b6fa87cdfa8de49609c7a03b"} Sep 30 17:18:53 crc kubenswrapper[4818]: I0930 17:18:53.172131 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1b4f31b1-3648-46e9-b292-251768678021","Type":"ContainerStarted","Data":"a12ae6a0664fbe2c578cabdabb9214a879b1d1e372c8c99242484cfc962ff9ec"} Sep 30 17:18:54 crc kubenswrapper[4818]: I0930 17:18:54.812442 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/kube-state-metrics-0" Sep 30 17:18:55 crc kubenswrapper[4818]: I0930 17:18:55.193138 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1b4f31b1-3648-46e9-b292-251768678021","Type":"ContainerStarted","Data":"f3ebce8ed96198fbad4ae76acb035f47cbb4cf36328c6aa6f1e9ffc667e7eee1"} Sep 30 17:18:55 crc kubenswrapper[4818]: I0930 17:18:55.193361 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:18:55 crc kubenswrapper[4818]: I0930 17:18:55.216012 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=1.904845619 podStartE2EDuration="5.21599408s" podCreationTimestamp="2025-09-30 17:18:50 +0000 UTC" firstStartedPulling="2025-09-30 17:18:51.038736177 +0000 UTC m=+1177.793007983" lastFinishedPulling="2025-09-30 17:18:54.349884618 +0000 UTC m=+1181.104156444" observedRunningTime="2025-09-30 17:18:55.215146027 +0000 UTC m=+1181.969417883" watchObservedRunningTime="2025-09-30 17:18:55.21599408 +0000 UTC m=+1181.970265896" Sep 30 17:18:56 crc kubenswrapper[4818]: I0930 17:18:56.727129 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-2436-account-create-bd64m"] Sep 30 17:18:56 crc kubenswrapper[4818]: I0930 17:18:56.728439 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-2436-account-create-bd64m" Sep 30 17:18:56 crc kubenswrapper[4818]: I0930 17:18:56.736344 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-2436-account-create-bd64m"] Sep 30 17:18:56 crc kubenswrapper[4818]: I0930 17:18:56.742365 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-db-secret" Sep 30 17:18:56 crc kubenswrapper[4818]: I0930 17:18:56.822473 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqbcl\" (UniqueName: \"kubernetes.io/projected/9b438313-1d91-4aaf-83f3-f0b56b3c3d62-kube-api-access-cqbcl\") pod \"watcher-2436-account-create-bd64m\" (UID: \"9b438313-1d91-4aaf-83f3-f0b56b3c3d62\") " pod="watcher-kuttl-default/watcher-2436-account-create-bd64m" Sep 30 17:18:56 crc kubenswrapper[4818]: I0930 17:18:56.923895 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqbcl\" (UniqueName: \"kubernetes.io/projected/9b438313-1d91-4aaf-83f3-f0b56b3c3d62-kube-api-access-cqbcl\") pod \"watcher-2436-account-create-bd64m\" (UID: \"9b438313-1d91-4aaf-83f3-f0b56b3c3d62\") " pod="watcher-kuttl-default/watcher-2436-account-create-bd64m" Sep 30 17:18:56 crc kubenswrapper[4818]: I0930 17:18:56.958396 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqbcl\" (UniqueName: \"kubernetes.io/projected/9b438313-1d91-4aaf-83f3-f0b56b3c3d62-kube-api-access-cqbcl\") pod \"watcher-2436-account-create-bd64m\" (UID: \"9b438313-1d91-4aaf-83f3-f0b56b3c3d62\") " pod="watcher-kuttl-default/watcher-2436-account-create-bd64m" Sep 30 17:18:57 crc kubenswrapper[4818]: I0930 17:18:57.059396 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-2436-account-create-bd64m" Sep 30 17:18:57 crc kubenswrapper[4818]: I0930 17:18:57.532743 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-2436-account-create-bd64m"] Sep 30 17:18:57 crc kubenswrapper[4818]: W0930 17:18:57.537582 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b438313_1d91_4aaf_83f3_f0b56b3c3d62.slice/crio-db57120a1da88e2b99e055115eb95f2b82ff47847a8259c7ce003c72531104b0 WatchSource:0}: Error finding container db57120a1da88e2b99e055115eb95f2b82ff47847a8259c7ce003c72531104b0: Status 404 returned error can't find the container with id db57120a1da88e2b99e055115eb95f2b82ff47847a8259c7ce003c72531104b0 Sep 30 17:18:58 crc kubenswrapper[4818]: I0930 17:18:58.228266 4818 generic.go:334] "Generic (PLEG): container finished" podID="9b438313-1d91-4aaf-83f3-f0b56b3c3d62" containerID="635c3fc1e2779aa6aa45c7556739ca5ec0d426229819dea5f6d025cdd090485b" exitCode=0 Sep 30 17:18:58 crc kubenswrapper[4818]: I0930 17:18:58.228335 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-2436-account-create-bd64m" event={"ID":"9b438313-1d91-4aaf-83f3-f0b56b3c3d62","Type":"ContainerDied","Data":"635c3fc1e2779aa6aa45c7556739ca5ec0d426229819dea5f6d025cdd090485b"} Sep 30 17:18:58 crc kubenswrapper[4818]: I0930 17:18:58.228562 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-2436-account-create-bd64m" event={"ID":"9b438313-1d91-4aaf-83f3-f0b56b3c3d62","Type":"ContainerStarted","Data":"db57120a1da88e2b99e055115eb95f2b82ff47847a8259c7ce003c72531104b0"} Sep 30 17:18:59 crc kubenswrapper[4818]: I0930 17:18:59.725569 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-2436-account-create-bd64m" Sep 30 17:18:59 crc kubenswrapper[4818]: I0930 17:18:59.882126 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqbcl\" (UniqueName: \"kubernetes.io/projected/9b438313-1d91-4aaf-83f3-f0b56b3c3d62-kube-api-access-cqbcl\") pod \"9b438313-1d91-4aaf-83f3-f0b56b3c3d62\" (UID: \"9b438313-1d91-4aaf-83f3-f0b56b3c3d62\") " Sep 30 17:18:59 crc kubenswrapper[4818]: I0930 17:18:59.888164 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b438313-1d91-4aaf-83f3-f0b56b3c3d62-kube-api-access-cqbcl" (OuterVolumeSpecName: "kube-api-access-cqbcl") pod "9b438313-1d91-4aaf-83f3-f0b56b3c3d62" (UID: "9b438313-1d91-4aaf-83f3-f0b56b3c3d62"). InnerVolumeSpecName "kube-api-access-cqbcl". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:18:59 crc kubenswrapper[4818]: I0930 17:18:59.984508 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqbcl\" (UniqueName: \"kubernetes.io/projected/9b438313-1d91-4aaf-83f3-f0b56b3c3d62-kube-api-access-cqbcl\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:00 crc kubenswrapper[4818]: I0930 17:19:00.250344 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-2436-account-create-bd64m" event={"ID":"9b438313-1d91-4aaf-83f3-f0b56b3c3d62","Type":"ContainerDied","Data":"db57120a1da88e2b99e055115eb95f2b82ff47847a8259c7ce003c72531104b0"} Sep 30 17:19:00 crc kubenswrapper[4818]: I0930 17:19:00.250396 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="db57120a1da88e2b99e055115eb95f2b82ff47847a8259c7ce003c72531104b0" Sep 30 17:19:00 crc kubenswrapper[4818]: I0930 17:19:00.250394 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-2436-account-create-bd64m" Sep 30 17:19:01 crc kubenswrapper[4818]: I0930 17:19:01.893776 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s"] Sep 30 17:19:01 crc kubenswrapper[4818]: E0930 17:19:01.894754 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b438313-1d91-4aaf-83f3-f0b56b3c3d62" containerName="mariadb-account-create" Sep 30 17:19:01 crc kubenswrapper[4818]: I0930 17:19:01.894816 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b438313-1d91-4aaf-83f3-f0b56b3c3d62" containerName="mariadb-account-create" Sep 30 17:19:01 crc kubenswrapper[4818]: I0930 17:19:01.895066 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b438313-1d91-4aaf-83f3-f0b56b3c3d62" containerName="mariadb-account-create" Sep 30 17:19:01 crc kubenswrapper[4818]: I0930 17:19:01.895593 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" Sep 30 17:19:01 crc kubenswrapper[4818]: I0930 17:19:01.900282 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-config-data" Sep 30 17:19:01 crc kubenswrapper[4818]: I0930 17:19:01.900560 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-8j8dr" Sep 30 17:19:01 crc kubenswrapper[4818]: I0930 17:19:01.906461 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s"] Sep 30 17:19:02 crc kubenswrapper[4818]: I0930 17:19:02.015074 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-config-data\") pod \"watcher-kuttl-db-sync-mcx7s\" (UID: \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" Sep 30 17:19:02 crc kubenswrapper[4818]: I0930 17:19:02.015232 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-mcx7s\" (UID: \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" Sep 30 17:19:02 crc kubenswrapper[4818]: I0930 17:19:02.015287 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-db-sync-config-data\") pod \"watcher-kuttl-db-sync-mcx7s\" (UID: \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" Sep 30 17:19:02 crc kubenswrapper[4818]: I0930 17:19:02.015317 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drvrl\" (UniqueName: \"kubernetes.io/projected/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-kube-api-access-drvrl\") pod \"watcher-kuttl-db-sync-mcx7s\" (UID: \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" Sep 30 17:19:02 crc kubenswrapper[4818]: I0930 17:19:02.116751 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-config-data\") pod \"watcher-kuttl-db-sync-mcx7s\" (UID: \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" Sep 30 17:19:02 crc kubenswrapper[4818]: I0930 17:19:02.117055 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-mcx7s\" (UID: \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" Sep 30 17:19:02 crc kubenswrapper[4818]: I0930 17:19:02.117120 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-db-sync-config-data\") pod \"watcher-kuttl-db-sync-mcx7s\" (UID: \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" Sep 30 17:19:02 crc kubenswrapper[4818]: I0930 17:19:02.117170 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drvrl\" (UniqueName: \"kubernetes.io/projected/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-kube-api-access-drvrl\") pod \"watcher-kuttl-db-sync-mcx7s\" (UID: \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" Sep 30 17:19:02 crc kubenswrapper[4818]: I0930 17:19:02.123323 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-db-sync-config-data\") pod \"watcher-kuttl-db-sync-mcx7s\" (UID: \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" Sep 30 17:19:02 crc kubenswrapper[4818]: I0930 17:19:02.129537 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-mcx7s\" (UID: \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" Sep 30 17:19:02 crc kubenswrapper[4818]: I0930 17:19:02.137318 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-config-data\") pod \"watcher-kuttl-db-sync-mcx7s\" (UID: \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" Sep 30 17:19:02 crc kubenswrapper[4818]: I0930 17:19:02.153514 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drvrl\" (UniqueName: \"kubernetes.io/projected/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-kube-api-access-drvrl\") pod \"watcher-kuttl-db-sync-mcx7s\" (UID: \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" Sep 30 17:19:02 crc kubenswrapper[4818]: I0930 17:19:02.211518 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" Sep 30 17:19:02 crc kubenswrapper[4818]: I0930 17:19:02.731508 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s"] Sep 30 17:19:03 crc kubenswrapper[4818]: I0930 17:19:03.295487 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" event={"ID":"023f49c0-bc3c-4d14-bb4f-b6006fd919fc","Type":"ContainerStarted","Data":"9987019838d88d0205d7a9cd4d57b7de3c9fcd95e40dac81d534cb5b78684775"} Sep 30 17:19:20 crc kubenswrapper[4818]: E0930 17:19:20.233883 4818 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.180:5001/podified-master-centos10/openstack-watcher-api:watcher_latest" Sep 30 17:19:20 crc kubenswrapper[4818]: E0930 17:19:20.234467 4818 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.180:5001/podified-master-centos10/openstack-watcher-api:watcher_latest" Sep 30 17:19:20 crc kubenswrapper[4818]: E0930 17:19:20.234802 4818 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:watcher-kuttl-db-sync,Image:38.102.83.180:5001/podified-master-centos10/openstack-watcher-api:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/watcher/watcher.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:watcher-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-drvrl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-kuttl-db-sync-mcx7s_watcher-kuttl-default(023f49c0-bc3c-4d14-bb4f-b6006fd919fc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Sep 30 17:19:20 crc kubenswrapper[4818]: E0930 17:19:20.237348 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-kuttl-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" podUID="023f49c0-bc3c-4d14-bb4f-b6006fd919fc" Sep 30 17:19:20 crc kubenswrapper[4818]: E0930 17:19:20.452658 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-kuttl-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.180:5001/podified-master-centos10/openstack-watcher-api:watcher_latest\\\"\"" pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" podUID="023f49c0-bc3c-4d14-bb4f-b6006fd919fc" Sep 30 17:19:20 crc kubenswrapper[4818]: I0930 17:19:20.559408 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:34 crc kubenswrapper[4818]: I0930 17:19:34.026797 4818 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 30 17:19:34 crc kubenswrapper[4818]: I0930 17:19:34.586912 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" event={"ID":"023f49c0-bc3c-4d14-bb4f-b6006fd919fc","Type":"ContainerStarted","Data":"80d6481dd9818317d403e845eff74a7b63727c657f00be22c0e231a5b6c0c43e"} Sep 30 17:19:34 crc kubenswrapper[4818]: I0930 17:19:34.601503 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" podStartSLOduration=2.239455157 podStartE2EDuration="33.601482552s" podCreationTimestamp="2025-09-30 17:19:01 +0000 UTC" firstStartedPulling="2025-09-30 17:19:02.7442024 +0000 UTC m=+1189.498474216" lastFinishedPulling="2025-09-30 17:19:34.106229805 +0000 UTC m=+1220.860501611" observedRunningTime="2025-09-30 17:19:34.60102344 +0000 UTC m=+1221.355295256" watchObservedRunningTime="2025-09-30 17:19:34.601482552 +0000 UTC m=+1221.355754398" Sep 30 17:19:37 crc kubenswrapper[4818]: I0930 17:19:37.619774 4818 generic.go:334] "Generic (PLEG): container finished" podID="023f49c0-bc3c-4d14-bb4f-b6006fd919fc" containerID="80d6481dd9818317d403e845eff74a7b63727c657f00be22c0e231a5b6c0c43e" exitCode=0 Sep 30 17:19:37 crc kubenswrapper[4818]: I0930 17:19:37.619873 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" event={"ID":"023f49c0-bc3c-4d14-bb4f-b6006fd919fc","Type":"ContainerDied","Data":"80d6481dd9818317d403e845eff74a7b63727c657f00be22c0e231a5b6c0c43e"} Sep 30 17:19:38 crc kubenswrapper[4818]: I0930 17:19:38.992064 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.066824 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-combined-ca-bundle\") pod \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\" (UID: \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\") " Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.066987 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-db-sync-config-data\") pod \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\" (UID: \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\") " Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.067113 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-config-data\") pod \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\" (UID: \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\") " Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.067182 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-drvrl\" (UniqueName: \"kubernetes.io/projected/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-kube-api-access-drvrl\") pod \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\" (UID: \"023f49c0-bc3c-4d14-bb4f-b6006fd919fc\") " Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.072062 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "023f49c0-bc3c-4d14-bb4f-b6006fd919fc" (UID: "023f49c0-bc3c-4d14-bb4f-b6006fd919fc"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.072147 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-kube-api-access-drvrl" (OuterVolumeSpecName: "kube-api-access-drvrl") pod "023f49c0-bc3c-4d14-bb4f-b6006fd919fc" (UID: "023f49c0-bc3c-4d14-bb4f-b6006fd919fc"). InnerVolumeSpecName "kube-api-access-drvrl". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.093024 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "023f49c0-bc3c-4d14-bb4f-b6006fd919fc" (UID: "023f49c0-bc3c-4d14-bb4f-b6006fd919fc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.111911 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-config-data" (OuterVolumeSpecName: "config-data") pod "023f49c0-bc3c-4d14-bb4f-b6006fd919fc" (UID: "023f49c0-bc3c-4d14-bb4f-b6006fd919fc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.169739 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.169773 4818 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.169783 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.169792 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-drvrl\" (UniqueName: \"kubernetes.io/projected/023f49c0-bc3c-4d14-bb4f-b6006fd919fc-kube-api-access-drvrl\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.638886 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" event={"ID":"023f49c0-bc3c-4d14-bb4f-b6006fd919fc","Type":"ContainerDied","Data":"9987019838d88d0205d7a9cd4d57b7de3c9fcd95e40dac81d534cb5b78684775"} Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.639192 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9987019838d88d0205d7a9cd4d57b7de3c9fcd95e40dac81d534cb5b78684775" Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.639255 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s" Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.992420 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:19:39 crc kubenswrapper[4818]: E0930 17:19:39.993120 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="023f49c0-bc3c-4d14-bb4f-b6006fd919fc" containerName="watcher-kuttl-db-sync" Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.993148 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="023f49c0-bc3c-4d14-bb4f-b6006fd919fc" containerName="watcher-kuttl-db-sync" Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.993467 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="023f49c0-bc3c-4d14-bb4f-b6006fd919fc" containerName="watcher-kuttl-db-sync" Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.994995 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.996857 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-8j8dr" Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.997281 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.998493 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:19:39 crc kubenswrapper[4818]: I0930 17:19:39.999761 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.002160 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.006848 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.007770 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.015664 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.016358 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-applier-config-data" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.029856 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.037465 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.083474 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.083525 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.083554 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zjgt\" (UniqueName: \"kubernetes.io/projected/95860e29-9750-42bf-b996-2e7513a86e05-kube-api-access-4zjgt\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.083577 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.083591 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.083641 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.083671 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.083686 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/95860e29-9750-42bf-b996-2e7513a86e05-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.083705 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdmx4\" (UniqueName: \"kubernetes.io/projected/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-kube-api-access-rdmx4\") pod \"watcher-kuttl-api-0\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.083720 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.083737 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.083776 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fg89\" (UniqueName: \"kubernetes.io/projected/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-kube-api-access-7fg89\") pod \"watcher-kuttl-applier-0\" (UID: \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.083796 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.083821 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-logs\") pod \"watcher-kuttl-api-0\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.185436 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.185584 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.185614 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/95860e29-9750-42bf-b996-2e7513a86e05-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.185670 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdmx4\" (UniqueName: \"kubernetes.io/projected/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-kube-api-access-rdmx4\") pod \"watcher-kuttl-api-0\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.185698 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.185746 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.185794 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fg89\" (UniqueName: \"kubernetes.io/projected/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-kube-api-access-7fg89\") pod \"watcher-kuttl-applier-0\" (UID: \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.185841 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.185896 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-logs\") pod \"watcher-kuttl-api-0\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.185973 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.186005 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.186057 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zjgt\" (UniqueName: \"kubernetes.io/projected/95860e29-9750-42bf-b996-2e7513a86e05-kube-api-access-4zjgt\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.186085 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.186103 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.186650 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.187699 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/95860e29-9750-42bf-b996-2e7513a86e05-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.188071 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-logs\") pod \"watcher-kuttl-api-0\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.190094 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.190462 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.190542 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.190878 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.192546 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.196403 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.206597 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.207898 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.210203 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fg89\" (UniqueName: \"kubernetes.io/projected/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-kube-api-access-7fg89\") pod \"watcher-kuttl-applier-0\" (UID: \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.211776 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zjgt\" (UniqueName: \"kubernetes.io/projected/95860e29-9750-42bf-b996-2e7513a86e05-kube-api-access-4zjgt\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.213304 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdmx4\" (UniqueName: \"kubernetes.io/projected/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-kube-api-access-rdmx4\") pod \"watcher-kuttl-api-0\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.320268 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.331827 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.342830 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.851627 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.862184 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:19:40 crc kubenswrapper[4818]: W0930 17:19:40.870444 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95860e29_9750_42bf_b996_2e7513a86e05.slice/crio-cdaf693e04537b38a5b43016d5340e907e56171c0d9ae6f8af7d71576c48041d WatchSource:0}: Error finding container cdaf693e04537b38a5b43016d5340e907e56171c0d9ae6f8af7d71576c48041d: Status 404 returned error can't find the container with id cdaf693e04537b38a5b43016d5340e907e56171c0d9ae6f8af7d71576c48041d Sep 30 17:19:40 crc kubenswrapper[4818]: I0930 17:19:40.920251 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:19:41 crc kubenswrapper[4818]: I0930 17:19:41.661869 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"95860e29-9750-42bf-b996-2e7513a86e05","Type":"ContainerStarted","Data":"cdaf693e04537b38a5b43016d5340e907e56171c0d9ae6f8af7d71576c48041d"} Sep 30 17:19:41 crc kubenswrapper[4818]: I0930 17:19:41.662982 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"cbff5ac7-125a-4a10-bf82-38b423cb6e9b","Type":"ContainerStarted","Data":"d3d56fe505ef508e91eb45027878f33e9cb5e2ea2168dfcaf6cd4c41aa59ad4a"} Sep 30 17:19:41 crc kubenswrapper[4818]: I0930 17:19:41.666214 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"3c495d93-4702-44c4-9c67-8e6b4d1d8f16","Type":"ContainerStarted","Data":"3e6077b77e96e17c4f1d2a40ef36f72032f836914a3bc0d5f693b8aa7ae798ef"} Sep 30 17:19:41 crc kubenswrapper[4818]: I0930 17:19:41.666264 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"3c495d93-4702-44c4-9c67-8e6b4d1d8f16","Type":"ContainerStarted","Data":"32b283f057f4a915930f901d68217514f3e29f4985e56ad6c2973df2c5d1ac01"} Sep 30 17:19:41 crc kubenswrapper[4818]: I0930 17:19:41.666279 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"3c495d93-4702-44c4-9c67-8e6b4d1d8f16","Type":"ContainerStarted","Data":"e38e1bab553735f92617e8de81fe198cf9ccb04f2dd4a6f220cb56ec5ed6e40e"} Sep 30 17:19:41 crc kubenswrapper[4818]: I0930 17:19:41.666568 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:41 crc kubenswrapper[4818]: I0930 17:19:41.693564 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=2.693547544 podStartE2EDuration="2.693547544s" podCreationTimestamp="2025-09-30 17:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:19:41.689314559 +0000 UTC m=+1228.443586385" watchObservedRunningTime="2025-09-30 17:19:41.693547544 +0000 UTC m=+1228.447819360" Sep 30 17:19:42 crc kubenswrapper[4818]: I0930 17:19:42.674215 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"cbff5ac7-125a-4a10-bf82-38b423cb6e9b","Type":"ContainerStarted","Data":"4e1edfb86c9a1b0e8a11dc73860cae3848b79f606b3cdc17f798efbb09cfb78f"} Sep 30 17:19:42 crc kubenswrapper[4818]: I0930 17:19:42.677507 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"95860e29-9750-42bf-b996-2e7513a86e05","Type":"ContainerStarted","Data":"66a003e96d7f6a33ad0ea2ee7cc4542273d637f772c0b899b152664e0b04190c"} Sep 30 17:19:42 crc kubenswrapper[4818]: I0930 17:19:42.719699 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podStartSLOduration=2.765731024 podStartE2EDuration="3.719676189s" podCreationTimestamp="2025-09-30 17:19:39 +0000 UTC" firstStartedPulling="2025-09-30 17:19:40.931378422 +0000 UTC m=+1227.685650238" lastFinishedPulling="2025-09-30 17:19:41.885323587 +0000 UTC m=+1228.639595403" observedRunningTime="2025-09-30 17:19:42.696349329 +0000 UTC m=+1229.450621165" watchObservedRunningTime="2025-09-30 17:19:42.719676189 +0000 UTC m=+1229.473948015" Sep 30 17:19:42 crc kubenswrapper[4818]: I0930 17:19:42.721124 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=2.73821545 podStartE2EDuration="3.721111628s" podCreationTimestamp="2025-09-30 17:19:39 +0000 UTC" firstStartedPulling="2025-09-30 17:19:40.892727637 +0000 UTC m=+1227.646999443" lastFinishedPulling="2025-09-30 17:19:41.875623805 +0000 UTC m=+1228.629895621" observedRunningTime="2025-09-30 17:19:42.710489611 +0000 UTC m=+1229.464761457" watchObservedRunningTime="2025-09-30 17:19:42.721111628 +0000 UTC m=+1229.475383454" Sep 30 17:19:43 crc kubenswrapper[4818]: I0930 17:19:43.852550 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:45 crc kubenswrapper[4818]: I0930 17:19:45.321234 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:45 crc kubenswrapper[4818]: I0930 17:19:45.343432 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:50 crc kubenswrapper[4818]: I0930 17:19:50.321443 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:50 crc kubenswrapper[4818]: I0930 17:19:50.328885 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:50 crc kubenswrapper[4818]: I0930 17:19:50.332068 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:50 crc kubenswrapper[4818]: I0930 17:19:50.344029 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:50 crc kubenswrapper[4818]: I0930 17:19:50.385069 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:50 crc kubenswrapper[4818]: I0930 17:19:50.385157 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:50 crc kubenswrapper[4818]: I0930 17:19:50.751674 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:50 crc kubenswrapper[4818]: I0930 17:19:50.756113 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:50 crc kubenswrapper[4818]: I0930 17:19:50.777670 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:50 crc kubenswrapper[4818]: I0930 17:19:50.802874 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.002306 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.002834 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="1b4f31b1-3648-46e9-b292-251768678021" containerName="ceilometer-central-agent" containerID="cri-o://5953bd399c584a71aaad3681d5016df4ce7bd399b9e009c813aa327236b56255" gracePeriod=30 Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.002957 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="1b4f31b1-3648-46e9-b292-251768678021" containerName="ceilometer-notification-agent" containerID="cri-o://a12ae6a0664fbe2c578cabdabb9214a879b1d1e372c8c99242484cfc962ff9ec" gracePeriod=30 Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.002989 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="1b4f31b1-3648-46e9-b292-251768678021" containerName="sg-core" containerID="cri-o://057b6d1c953e8065c04be635fa752fd2f6d97146b6fa87cdfa8de49609c7a03b" gracePeriod=30 Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.003162 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="1b4f31b1-3648-46e9-b292-251768678021" containerName="proxy-httpd" containerID="cri-o://f3ebce8ed96198fbad4ae76acb035f47cbb4cf36328c6aa6f1e9ffc667e7eee1" gracePeriod=30 Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.231429 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s"] Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.240040 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-mcx7s"] Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.315104 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher2436-account-delete-r2x89"] Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.316413 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher2436-account-delete-r2x89" Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.327439 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher2436-account-delete-r2x89"] Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.385468 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.395099 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5xh8\" (UniqueName: \"kubernetes.io/projected/5c76c250-5799-4c50-a044-a125a78f3ff6-kube-api-access-s5xh8\") pod \"watcher2436-account-delete-r2x89\" (UID: \"5c76c250-5799-4c50-a044-a125a78f3ff6\") " pod="watcher-kuttl-default/watcher2436-account-delete-r2x89" Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.444570 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.455944 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.496111 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5xh8\" (UniqueName: \"kubernetes.io/projected/5c76c250-5799-4c50-a044-a125a78f3ff6-kube-api-access-s5xh8\") pod \"watcher2436-account-delete-r2x89\" (UID: \"5c76c250-5799-4c50-a044-a125a78f3ff6\") " pod="watcher-kuttl-default/watcher2436-account-delete-r2x89" Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.543094 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5xh8\" (UniqueName: \"kubernetes.io/projected/5c76c250-5799-4c50-a044-a125a78f3ff6-kube-api-access-s5xh8\") pod \"watcher2436-account-delete-r2x89\" (UID: \"5c76c250-5799-4c50-a044-a125a78f3ff6\") " pod="watcher-kuttl-default/watcher2436-account-delete-r2x89" Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.635477 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher2436-account-delete-r2x89" Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.799781 4818 generic.go:334] "Generic (PLEG): container finished" podID="1b4f31b1-3648-46e9-b292-251768678021" containerID="f3ebce8ed96198fbad4ae76acb035f47cbb4cf36328c6aa6f1e9ffc667e7eee1" exitCode=0 Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.800271 4818 generic.go:334] "Generic (PLEG): container finished" podID="1b4f31b1-3648-46e9-b292-251768678021" containerID="057b6d1c953e8065c04be635fa752fd2f6d97146b6fa87cdfa8de49609c7a03b" exitCode=2 Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.800503 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="cbff5ac7-125a-4a10-bf82-38b423cb6e9b" containerName="watcher-applier" containerID="cri-o://4e1edfb86c9a1b0e8a11dc73860cae3848b79f606b3cdc17f798efbb09cfb78f" gracePeriod=30 Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.800612 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1b4f31b1-3648-46e9-b292-251768678021","Type":"ContainerDied","Data":"f3ebce8ed96198fbad4ae76acb035f47cbb4cf36328c6aa6f1e9ffc667e7eee1"} Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.800657 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1b4f31b1-3648-46e9-b292-251768678021","Type":"ContainerDied","Data":"057b6d1c953e8065c04be635fa752fd2f6d97146b6fa87cdfa8de49609c7a03b"} Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.800770 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="3c495d93-4702-44c4-9c67-8e6b4d1d8f16" containerName="watcher-kuttl-api-log" containerID="cri-o://32b283f057f4a915930f901d68217514f3e29f4985e56ad6c2973df2c5d1ac01" gracePeriod=30 Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.801142 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="3c495d93-4702-44c4-9c67-8e6b4d1d8f16" containerName="watcher-api" containerID="cri-o://3e6077b77e96e17c4f1d2a40ef36f72032f836914a3bc0d5f693b8aa7ae798ef" gracePeriod=30 Sep 30 17:19:52 crc kubenswrapper[4818]: I0930 17:19:52.810300 4818 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" secret="" err="secret \"watcher-watcher-kuttl-dockercfg-8j8dr\" not found" Sep 30 17:19:52 crc kubenswrapper[4818]: E0930 17:19:52.903172 4818 secret.go:188] Couldn't get secret watcher-kuttl-default/watcher-kuttl-decision-engine-config-data: secret "watcher-kuttl-decision-engine-config-data" not found Sep 30 17:19:52 crc kubenswrapper[4818]: E0930 17:19:52.903279 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-config-data podName:95860e29-9750-42bf-b996-2e7513a86e05 nodeName:}" failed. No retries permitted until 2025-09-30 17:19:53.403251176 +0000 UTC m=+1240.157522992 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-config-data") pod "watcher-kuttl-decision-engine-0" (UID: "95860e29-9750-42bf-b996-2e7513a86e05") : secret "watcher-kuttl-decision-engine-config-data" not found Sep 30 17:19:53 crc kubenswrapper[4818]: I0930 17:19:53.122072 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher2436-account-delete-r2x89"] Sep 30 17:19:53 crc kubenswrapper[4818]: E0930 17:19:53.411619 4818 secret.go:188] Couldn't get secret watcher-kuttl-default/watcher-kuttl-decision-engine-config-data: secret "watcher-kuttl-decision-engine-config-data" not found Sep 30 17:19:53 crc kubenswrapper[4818]: E0930 17:19:53.412339 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-config-data podName:95860e29-9750-42bf-b996-2e7513a86e05 nodeName:}" failed. No retries permitted until 2025-09-30 17:19:54.412308115 +0000 UTC m=+1241.166579931 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-config-data") pod "watcher-kuttl-decision-engine-0" (UID: "95860e29-9750-42bf-b996-2e7513a86e05") : secret "watcher-kuttl-decision-engine-config-data" not found Sep 30 17:19:53 crc kubenswrapper[4818]: I0930 17:19:53.811329 4818 generic.go:334] "Generic (PLEG): container finished" podID="5c76c250-5799-4c50-a044-a125a78f3ff6" containerID="840a104e3b6b271ef2c90646821d246affcefe3f212282fabb4b9073d8040c0a" exitCode=0 Sep 30 17:19:53 crc kubenswrapper[4818]: I0930 17:19:53.811416 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher2436-account-delete-r2x89" event={"ID":"5c76c250-5799-4c50-a044-a125a78f3ff6","Type":"ContainerDied","Data":"840a104e3b6b271ef2c90646821d246affcefe3f212282fabb4b9073d8040c0a"} Sep 30 17:19:53 crc kubenswrapper[4818]: I0930 17:19:53.811446 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher2436-account-delete-r2x89" event={"ID":"5c76c250-5799-4c50-a044-a125a78f3ff6","Type":"ContainerStarted","Data":"21e6f54dc6469a437342aa9c37bbc6f184bcfe01335d1378306c4f128029f4fc"} Sep 30 17:19:53 crc kubenswrapper[4818]: I0930 17:19:53.815030 4818 generic.go:334] "Generic (PLEG): container finished" podID="1b4f31b1-3648-46e9-b292-251768678021" containerID="5953bd399c584a71aaad3681d5016df4ce7bd399b9e009c813aa327236b56255" exitCode=0 Sep 30 17:19:53 crc kubenswrapper[4818]: I0930 17:19:53.815109 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1b4f31b1-3648-46e9-b292-251768678021","Type":"ContainerDied","Data":"5953bd399c584a71aaad3681d5016df4ce7bd399b9e009c813aa327236b56255"} Sep 30 17:19:53 crc kubenswrapper[4818]: I0930 17:19:53.817472 4818 generic.go:334] "Generic (PLEG): container finished" podID="3c495d93-4702-44c4-9c67-8e6b4d1d8f16" containerID="32b283f057f4a915930f901d68217514f3e29f4985e56ad6c2973df2c5d1ac01" exitCode=143 Sep 30 17:19:53 crc kubenswrapper[4818]: I0930 17:19:53.817540 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"3c495d93-4702-44c4-9c67-8e6b4d1d8f16","Type":"ContainerDied","Data":"32b283f057f4a915930f901d68217514f3e29f4985e56ad6c2973df2c5d1ac01"} Sep 30 17:19:53 crc kubenswrapper[4818]: I0930 17:19:53.817662 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="95860e29-9750-42bf-b996-2e7513a86e05" containerName="watcher-decision-engine" containerID="cri-o://66a003e96d7f6a33ad0ea2ee7cc4542273d637f772c0b899b152664e0b04190c" gracePeriod=30 Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.040994 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="023f49c0-bc3c-4d14-bb4f-b6006fd919fc" path="/var/lib/kubelet/pods/023f49c0-bc3c-4d14-bb4f-b6006fd919fc/volumes" Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.412237 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:54 crc kubenswrapper[4818]: E0930 17:19:54.445472 4818 secret.go:188] Couldn't get secret watcher-kuttl-default/watcher-kuttl-decision-engine-config-data: secret "watcher-kuttl-decision-engine-config-data" not found Sep 30 17:19:54 crc kubenswrapper[4818]: E0930 17:19:54.445577 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-config-data podName:95860e29-9750-42bf-b996-2e7513a86e05 nodeName:}" failed. No retries permitted until 2025-09-30 17:19:56.445553695 +0000 UTC m=+1243.199825511 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-config-data") pod "watcher-kuttl-decision-engine-0" (UID: "95860e29-9750-42bf-b996-2e7513a86e05") : secret "watcher-kuttl-decision-engine-config-data" not found Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.546863 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-custom-prometheus-ca\") pod \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.547014 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-config-data\") pod \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.547060 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-combined-ca-bundle\") pod \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.547127 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rdmx4\" (UniqueName: \"kubernetes.io/projected/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-kube-api-access-rdmx4\") pod \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.547240 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-logs\") pod \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\" (UID: \"3c495d93-4702-44c4-9c67-8e6b4d1d8f16\") " Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.547817 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-logs" (OuterVolumeSpecName: "logs") pod "3c495d93-4702-44c4-9c67-8e6b4d1d8f16" (UID: "3c495d93-4702-44c4-9c67-8e6b4d1d8f16"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.565230 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-kube-api-access-rdmx4" (OuterVolumeSpecName: "kube-api-access-rdmx4") pod "3c495d93-4702-44c4-9c67-8e6b4d1d8f16" (UID: "3c495d93-4702-44c4-9c67-8e6b4d1d8f16"). InnerVolumeSpecName "kube-api-access-rdmx4". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.582227 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3c495d93-4702-44c4-9c67-8e6b4d1d8f16" (UID: "3c495d93-4702-44c4-9c67-8e6b4d1d8f16"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.582274 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "3c495d93-4702-44c4-9c67-8e6b4d1d8f16" (UID: "3c495d93-4702-44c4-9c67-8e6b4d1d8f16"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.599980 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-config-data" (OuterVolumeSpecName: "config-data") pod "3c495d93-4702-44c4-9c67-8e6b4d1d8f16" (UID: "3c495d93-4702-44c4-9c67-8e6b4d1d8f16"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.648951 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rdmx4\" (UniqueName: \"kubernetes.io/projected/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-kube-api-access-rdmx4\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.648983 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.648997 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.649007 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.649021 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c495d93-4702-44c4-9c67-8e6b4d1d8f16-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.834268 4818 generic.go:334] "Generic (PLEG): container finished" podID="3c495d93-4702-44c4-9c67-8e6b4d1d8f16" containerID="3e6077b77e96e17c4f1d2a40ef36f72032f836914a3bc0d5f693b8aa7ae798ef" exitCode=0 Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.834343 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"3c495d93-4702-44c4-9c67-8e6b4d1d8f16","Type":"ContainerDied","Data":"3e6077b77e96e17c4f1d2a40ef36f72032f836914a3bc0d5f693b8aa7ae798ef"} Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.834734 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"3c495d93-4702-44c4-9c67-8e6b4d1d8f16","Type":"ContainerDied","Data":"e38e1bab553735f92617e8de81fe198cf9ccb04f2dd4a6f220cb56ec5ed6e40e"} Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.834807 4818 scope.go:117] "RemoveContainer" containerID="3e6077b77e96e17c4f1d2a40ef36f72032f836914a3bc0d5f693b8aa7ae798ef" Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.834422 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.872993 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.890013 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.894803 4818 scope.go:117] "RemoveContainer" containerID="32b283f057f4a915930f901d68217514f3e29f4985e56ad6c2973df2c5d1ac01" Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.926237 4818 scope.go:117] "RemoveContainer" containerID="3e6077b77e96e17c4f1d2a40ef36f72032f836914a3bc0d5f693b8aa7ae798ef" Sep 30 17:19:54 crc kubenswrapper[4818]: E0930 17:19:54.926788 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e6077b77e96e17c4f1d2a40ef36f72032f836914a3bc0d5f693b8aa7ae798ef\": container with ID starting with 3e6077b77e96e17c4f1d2a40ef36f72032f836914a3bc0d5f693b8aa7ae798ef not found: ID does not exist" containerID="3e6077b77e96e17c4f1d2a40ef36f72032f836914a3bc0d5f693b8aa7ae798ef" Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.926842 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e6077b77e96e17c4f1d2a40ef36f72032f836914a3bc0d5f693b8aa7ae798ef"} err="failed to get container status \"3e6077b77e96e17c4f1d2a40ef36f72032f836914a3bc0d5f693b8aa7ae798ef\": rpc error: code = NotFound desc = could not find container \"3e6077b77e96e17c4f1d2a40ef36f72032f836914a3bc0d5f693b8aa7ae798ef\": container with ID starting with 3e6077b77e96e17c4f1d2a40ef36f72032f836914a3bc0d5f693b8aa7ae798ef not found: ID does not exist" Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.926876 4818 scope.go:117] "RemoveContainer" containerID="32b283f057f4a915930f901d68217514f3e29f4985e56ad6c2973df2c5d1ac01" Sep 30 17:19:54 crc kubenswrapper[4818]: E0930 17:19:54.927325 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32b283f057f4a915930f901d68217514f3e29f4985e56ad6c2973df2c5d1ac01\": container with ID starting with 32b283f057f4a915930f901d68217514f3e29f4985e56ad6c2973df2c5d1ac01 not found: ID does not exist" containerID="32b283f057f4a915930f901d68217514f3e29f4985e56ad6c2973df2c5d1ac01" Sep 30 17:19:54 crc kubenswrapper[4818]: I0930 17:19:54.927364 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32b283f057f4a915930f901d68217514f3e29f4985e56ad6c2973df2c5d1ac01"} err="failed to get container status \"32b283f057f4a915930f901d68217514f3e29f4985e56ad6c2973df2c5d1ac01\": rpc error: code = NotFound desc = could not find container \"32b283f057f4a915930f901d68217514f3e29f4985e56ad6c2973df2c5d1ac01\": container with ID starting with 32b283f057f4a915930f901d68217514f3e29f4985e56ad6c2973df2c5d1ac01 not found: ID does not exist" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.212745 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher2436-account-delete-r2x89" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.359662 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s5xh8\" (UniqueName: \"kubernetes.io/projected/5c76c250-5799-4c50-a044-a125a78f3ff6-kube-api-access-s5xh8\") pod \"5c76c250-5799-4c50-a044-a125a78f3ff6\" (UID: \"5c76c250-5799-4c50-a044-a125a78f3ff6\") " Sep 30 17:19:55 crc kubenswrapper[4818]: E0930 17:19:55.366104 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4e1edfb86c9a1b0e8a11dc73860cae3848b79f606b3cdc17f798efbb09cfb78f" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.366995 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c76c250-5799-4c50-a044-a125a78f3ff6-kube-api-access-s5xh8" (OuterVolumeSpecName: "kube-api-access-s5xh8") pod "5c76c250-5799-4c50-a044-a125a78f3ff6" (UID: "5c76c250-5799-4c50-a044-a125a78f3ff6"). InnerVolumeSpecName "kube-api-access-s5xh8". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:19:55 crc kubenswrapper[4818]: E0930 17:19:55.367763 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4e1edfb86c9a1b0e8a11dc73860cae3848b79f606b3cdc17f798efbb09cfb78f" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 30 17:19:55 crc kubenswrapper[4818]: E0930 17:19:55.369342 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4e1edfb86c9a1b0e8a11dc73860cae3848b79f606b3cdc17f798efbb09cfb78f" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 30 17:19:55 crc kubenswrapper[4818]: E0930 17:19:55.369380 4818 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="cbff5ac7-125a-4a10-bf82-38b423cb6e9b" containerName="watcher-applier" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.461661 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s5xh8\" (UniqueName: \"kubernetes.io/projected/5c76c250-5799-4c50-a044-a125a78f3ff6-kube-api-access-s5xh8\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.724576 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.843726 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher2436-account-delete-r2x89" event={"ID":"5c76c250-5799-4c50-a044-a125a78f3ff6","Type":"ContainerDied","Data":"21e6f54dc6469a437342aa9c37bbc6f184bcfe01335d1378306c4f128029f4fc"} Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.843763 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21e6f54dc6469a437342aa9c37bbc6f184bcfe01335d1378306c4f128029f4fc" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.843818 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher2436-account-delete-r2x89" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.848243 4818 generic.go:334] "Generic (PLEG): container finished" podID="1b4f31b1-3648-46e9-b292-251768678021" containerID="a12ae6a0664fbe2c578cabdabb9214a879b1d1e372c8c99242484cfc962ff9ec" exitCode=0 Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.848307 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.848316 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1b4f31b1-3648-46e9-b292-251768678021","Type":"ContainerDied","Data":"a12ae6a0664fbe2c578cabdabb9214a879b1d1e372c8c99242484cfc962ff9ec"} Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.848343 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1b4f31b1-3648-46e9-b292-251768678021","Type":"ContainerDied","Data":"d2c4fc12d5e8c279534f69d2d497f323564f55abf80c909e7f477dd522e43012"} Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.848360 4818 scope.go:117] "RemoveContainer" containerID="f3ebce8ed96198fbad4ae76acb035f47cbb4cf36328c6aa6f1e9ffc667e7eee1" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.867520 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-ceilometer-tls-certs\") pod \"1b4f31b1-3648-46e9-b292-251768678021\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.867603 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-scripts\") pod \"1b4f31b1-3648-46e9-b292-251768678021\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.867637 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-config-data\") pod \"1b4f31b1-3648-46e9-b292-251768678021\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.867671 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b4f31b1-3648-46e9-b292-251768678021-log-httpd\") pod \"1b4f31b1-3648-46e9-b292-251768678021\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.867689 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-combined-ca-bundle\") pod \"1b4f31b1-3648-46e9-b292-251768678021\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.867710 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b4f31b1-3648-46e9-b292-251768678021-run-httpd\") pod \"1b4f31b1-3648-46e9-b292-251768678021\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.867759 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-sg-core-conf-yaml\") pod \"1b4f31b1-3648-46e9-b292-251768678021\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.867797 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r75fq\" (UniqueName: \"kubernetes.io/projected/1b4f31b1-3648-46e9-b292-251768678021-kube-api-access-r75fq\") pod \"1b4f31b1-3648-46e9-b292-251768678021\" (UID: \"1b4f31b1-3648-46e9-b292-251768678021\") " Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.869276 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1b4f31b1-3648-46e9-b292-251768678021-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1b4f31b1-3648-46e9-b292-251768678021" (UID: "1b4f31b1-3648-46e9-b292-251768678021"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.870212 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1b4f31b1-3648-46e9-b292-251768678021-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1b4f31b1-3648-46e9-b292-251768678021" (UID: "1b4f31b1-3648-46e9-b292-251768678021"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.871621 4818 scope.go:117] "RemoveContainer" containerID="057b6d1c953e8065c04be635fa752fd2f6d97146b6fa87cdfa8de49609c7a03b" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.877070 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-scripts" (OuterVolumeSpecName: "scripts") pod "1b4f31b1-3648-46e9-b292-251768678021" (UID: "1b4f31b1-3648-46e9-b292-251768678021"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.905215 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b4f31b1-3648-46e9-b292-251768678021-kube-api-access-r75fq" (OuterVolumeSpecName: "kube-api-access-r75fq") pod "1b4f31b1-3648-46e9-b292-251768678021" (UID: "1b4f31b1-3648-46e9-b292-251768678021"). InnerVolumeSpecName "kube-api-access-r75fq". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.951223 4818 scope.go:117] "RemoveContainer" containerID="a12ae6a0664fbe2c578cabdabb9214a879b1d1e372c8c99242484cfc962ff9ec" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.951483 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "1b4f31b1-3648-46e9-b292-251768678021" (UID: "1b4f31b1-3648-46e9-b292-251768678021"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.965113 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1b4f31b1-3648-46e9-b292-251768678021" (UID: "1b4f31b1-3648-46e9-b292-251768678021"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.969073 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b4f31b1-3648-46e9-b292-251768678021-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.969097 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b4f31b1-3648-46e9-b292-251768678021-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.969105 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.969115 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r75fq\" (UniqueName: \"kubernetes.io/projected/1b4f31b1-3648-46e9-b292-251768678021-kube-api-access-r75fq\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.969123 4818 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.969132 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.976724 4818 scope.go:117] "RemoveContainer" containerID="5953bd399c584a71aaad3681d5016df4ce7bd399b9e009c813aa327236b56255" Sep 30 17:19:55 crc kubenswrapper[4818]: I0930 17:19:55.984680 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1b4f31b1-3648-46e9-b292-251768678021" (UID: "1b4f31b1-3648-46e9-b292-251768678021"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.000130 4818 scope.go:117] "RemoveContainer" containerID="f3ebce8ed96198fbad4ae76acb035f47cbb4cf36328c6aa6f1e9ffc667e7eee1" Sep 30 17:19:56 crc kubenswrapper[4818]: E0930 17:19:56.000475 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3ebce8ed96198fbad4ae76acb035f47cbb4cf36328c6aa6f1e9ffc667e7eee1\": container with ID starting with f3ebce8ed96198fbad4ae76acb035f47cbb4cf36328c6aa6f1e9ffc667e7eee1 not found: ID does not exist" containerID="f3ebce8ed96198fbad4ae76acb035f47cbb4cf36328c6aa6f1e9ffc667e7eee1" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.000507 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3ebce8ed96198fbad4ae76acb035f47cbb4cf36328c6aa6f1e9ffc667e7eee1"} err="failed to get container status \"f3ebce8ed96198fbad4ae76acb035f47cbb4cf36328c6aa6f1e9ffc667e7eee1\": rpc error: code = NotFound desc = could not find container \"f3ebce8ed96198fbad4ae76acb035f47cbb4cf36328c6aa6f1e9ffc667e7eee1\": container with ID starting with f3ebce8ed96198fbad4ae76acb035f47cbb4cf36328c6aa6f1e9ffc667e7eee1 not found: ID does not exist" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.000537 4818 scope.go:117] "RemoveContainer" containerID="057b6d1c953e8065c04be635fa752fd2f6d97146b6fa87cdfa8de49609c7a03b" Sep 30 17:19:56 crc kubenswrapper[4818]: E0930 17:19:56.000777 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"057b6d1c953e8065c04be635fa752fd2f6d97146b6fa87cdfa8de49609c7a03b\": container with ID starting with 057b6d1c953e8065c04be635fa752fd2f6d97146b6fa87cdfa8de49609c7a03b not found: ID does not exist" containerID="057b6d1c953e8065c04be635fa752fd2f6d97146b6fa87cdfa8de49609c7a03b" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.000839 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"057b6d1c953e8065c04be635fa752fd2f6d97146b6fa87cdfa8de49609c7a03b"} err="failed to get container status \"057b6d1c953e8065c04be635fa752fd2f6d97146b6fa87cdfa8de49609c7a03b\": rpc error: code = NotFound desc = could not find container \"057b6d1c953e8065c04be635fa752fd2f6d97146b6fa87cdfa8de49609c7a03b\": container with ID starting with 057b6d1c953e8065c04be635fa752fd2f6d97146b6fa87cdfa8de49609c7a03b not found: ID does not exist" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.000866 4818 scope.go:117] "RemoveContainer" containerID="a12ae6a0664fbe2c578cabdabb9214a879b1d1e372c8c99242484cfc962ff9ec" Sep 30 17:19:56 crc kubenswrapper[4818]: E0930 17:19:56.001254 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a12ae6a0664fbe2c578cabdabb9214a879b1d1e372c8c99242484cfc962ff9ec\": container with ID starting with a12ae6a0664fbe2c578cabdabb9214a879b1d1e372c8c99242484cfc962ff9ec not found: ID does not exist" containerID="a12ae6a0664fbe2c578cabdabb9214a879b1d1e372c8c99242484cfc962ff9ec" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.001276 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a12ae6a0664fbe2c578cabdabb9214a879b1d1e372c8c99242484cfc962ff9ec"} err="failed to get container status \"a12ae6a0664fbe2c578cabdabb9214a879b1d1e372c8c99242484cfc962ff9ec\": rpc error: code = NotFound desc = could not find container \"a12ae6a0664fbe2c578cabdabb9214a879b1d1e372c8c99242484cfc962ff9ec\": container with ID starting with a12ae6a0664fbe2c578cabdabb9214a879b1d1e372c8c99242484cfc962ff9ec not found: ID does not exist" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.001293 4818 scope.go:117] "RemoveContainer" containerID="5953bd399c584a71aaad3681d5016df4ce7bd399b9e009c813aa327236b56255" Sep 30 17:19:56 crc kubenswrapper[4818]: E0930 17:19:56.001509 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5953bd399c584a71aaad3681d5016df4ce7bd399b9e009c813aa327236b56255\": container with ID starting with 5953bd399c584a71aaad3681d5016df4ce7bd399b9e009c813aa327236b56255 not found: ID does not exist" containerID="5953bd399c584a71aaad3681d5016df4ce7bd399b9e009c813aa327236b56255" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.001533 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5953bd399c584a71aaad3681d5016df4ce7bd399b9e009c813aa327236b56255"} err="failed to get container status \"5953bd399c584a71aaad3681d5016df4ce7bd399b9e009c813aa327236b56255\": rpc error: code = NotFound desc = could not find container \"5953bd399c584a71aaad3681d5016df4ce7bd399b9e009c813aa327236b56255\": container with ID starting with 5953bd399c584a71aaad3681d5016df4ce7bd399b9e009c813aa327236b56255 not found: ID does not exist" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.011667 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-config-data" (OuterVolumeSpecName: "config-data") pod "1b4f31b1-3648-46e9-b292-251768678021" (UID: "1b4f31b1-3648-46e9-b292-251768678021"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.029487 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c495d93-4702-44c4-9c67-8e6b4d1d8f16" path="/var/lib/kubelet/pods/3c495d93-4702-44c4-9c67-8e6b4d1d8f16/volumes" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.071085 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.071121 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b4f31b1-3648-46e9-b292-251768678021-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.173363 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.187243 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.196008 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:19:56 crc kubenswrapper[4818]: E0930 17:19:56.196410 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c495d93-4702-44c4-9c67-8e6b4d1d8f16" containerName="watcher-kuttl-api-log" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.196433 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c495d93-4702-44c4-9c67-8e6b4d1d8f16" containerName="watcher-kuttl-api-log" Sep 30 17:19:56 crc kubenswrapper[4818]: E0930 17:19:56.196453 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c495d93-4702-44c4-9c67-8e6b4d1d8f16" containerName="watcher-api" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.196461 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c495d93-4702-44c4-9c67-8e6b4d1d8f16" containerName="watcher-api" Sep 30 17:19:56 crc kubenswrapper[4818]: E0930 17:19:56.196478 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b4f31b1-3648-46e9-b292-251768678021" containerName="sg-core" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.196486 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b4f31b1-3648-46e9-b292-251768678021" containerName="sg-core" Sep 30 17:19:56 crc kubenswrapper[4818]: E0930 17:19:56.196496 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c76c250-5799-4c50-a044-a125a78f3ff6" containerName="mariadb-account-delete" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.196503 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c76c250-5799-4c50-a044-a125a78f3ff6" containerName="mariadb-account-delete" Sep 30 17:19:56 crc kubenswrapper[4818]: E0930 17:19:56.196519 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b4f31b1-3648-46e9-b292-251768678021" containerName="proxy-httpd" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.196527 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b4f31b1-3648-46e9-b292-251768678021" containerName="proxy-httpd" Sep 30 17:19:56 crc kubenswrapper[4818]: E0930 17:19:56.196544 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b4f31b1-3648-46e9-b292-251768678021" containerName="ceilometer-notification-agent" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.196554 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b4f31b1-3648-46e9-b292-251768678021" containerName="ceilometer-notification-agent" Sep 30 17:19:56 crc kubenswrapper[4818]: E0930 17:19:56.196568 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b4f31b1-3648-46e9-b292-251768678021" containerName="ceilometer-central-agent" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.196576 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b4f31b1-3648-46e9-b292-251768678021" containerName="ceilometer-central-agent" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.196867 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b4f31b1-3648-46e9-b292-251768678021" containerName="sg-core" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.196915 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c495d93-4702-44c4-9c67-8e6b4d1d8f16" containerName="watcher-kuttl-api-log" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.196943 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b4f31b1-3648-46e9-b292-251768678021" containerName="proxy-httpd" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.196970 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b4f31b1-3648-46e9-b292-251768678021" containerName="ceilometer-central-agent" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.196986 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c495d93-4702-44c4-9c67-8e6b4d1d8f16" containerName="watcher-api" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.197003 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c76c250-5799-4c50-a044-a125a78f3ff6" containerName="mariadb-account-delete" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.197025 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b4f31b1-3648-46e9-b292-251768678021" containerName="ceilometer-notification-agent" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.208612 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.208718 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.210155 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.213460 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.220370 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.274093 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.274147 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lr5jh\" (UniqueName: \"kubernetes.io/projected/7ebe0898-feb2-4e12-b4be-efad66862264-kube-api-access-lr5jh\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.274174 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ebe0898-feb2-4e12-b4be-efad66862264-log-httpd\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.274217 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.274251 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-config-data\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.274280 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ebe0898-feb2-4e12-b4be-efad66862264-run-httpd\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.274339 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.274377 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-scripts\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.375910 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ebe0898-feb2-4e12-b4be-efad66862264-log-httpd\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.376253 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.376292 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-config-data\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.376314 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ebe0898-feb2-4e12-b4be-efad66862264-run-httpd\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.376386 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.376420 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-scripts\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.376483 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.376512 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lr5jh\" (UniqueName: \"kubernetes.io/projected/7ebe0898-feb2-4e12-b4be-efad66862264-kube-api-access-lr5jh\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.377212 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ebe0898-feb2-4e12-b4be-efad66862264-log-httpd\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.380737 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.382167 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ebe0898-feb2-4e12-b4be-efad66862264-run-httpd\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.384271 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.385000 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-scripts\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.388657 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-config-data\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.390724 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.399290 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lr5jh\" (UniqueName: \"kubernetes.io/projected/7ebe0898-feb2-4e12-b4be-efad66862264-kube-api-access-lr5jh\") pod \"ceilometer-0\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:56 crc kubenswrapper[4818]: E0930 17:19:56.477740 4818 secret.go:188] Couldn't get secret watcher-kuttl-default/watcher-kuttl-decision-engine-config-data: secret "watcher-kuttl-decision-engine-config-data" not found Sep 30 17:19:56 crc kubenswrapper[4818]: E0930 17:19:56.477834 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-config-data podName:95860e29-9750-42bf-b996-2e7513a86e05 nodeName:}" failed. No retries permitted until 2025-09-30 17:20:00.477810248 +0000 UTC m=+1247.232082064 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-config-data") pod "watcher-kuttl-decision-engine-0" (UID: "95860e29-9750-42bf-b996-2e7513a86e05") : secret "watcher-kuttl-decision-engine-config-data" not found Sep 30 17:19:56 crc kubenswrapper[4818]: I0930 17:19:56.551467 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:19:57 crc kubenswrapper[4818]: I0930 17:19:57.038286 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:19:57 crc kubenswrapper[4818]: I0930 17:19:57.369996 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-db-create-zxlqp"] Sep 30 17:19:57 crc kubenswrapper[4818]: I0930 17:19:57.375548 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-db-create-zxlqp"] Sep 30 17:19:57 crc kubenswrapper[4818]: I0930 17:19:57.385838 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher2436-account-delete-r2x89"] Sep 30 17:19:57 crc kubenswrapper[4818]: I0930 17:19:57.392799 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-2436-account-create-bd64m"] Sep 30 17:19:57 crc kubenswrapper[4818]: I0930 17:19:57.400936 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher2436-account-delete-r2x89"] Sep 30 17:19:57 crc kubenswrapper[4818]: I0930 17:19:57.407421 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-2436-account-create-bd64m"] Sep 30 17:19:57 crc kubenswrapper[4818]: I0930 17:19:57.868266 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7ebe0898-feb2-4e12-b4be-efad66862264","Type":"ContainerStarted","Data":"73d7561c0e62697f0ebfd4b2638443d8e7dfa3385b5d5a188f830b1880e7d6f7"} Sep 30 17:19:57 crc kubenswrapper[4818]: I0930 17:19:57.868549 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7ebe0898-feb2-4e12-b4be-efad66862264","Type":"ContainerStarted","Data":"e59f413a682e1b65c935fa30bbe4dd32113869a69a79c87ba1ee5c47589a37b0"} Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.030397 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b4f31b1-3648-46e9-b292-251768678021" path="/var/lib/kubelet/pods/1b4f31b1-3648-46e9-b292-251768678021/volumes" Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.031509 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c76c250-5799-4c50-a044-a125a78f3ff6" path="/var/lib/kubelet/pods/5c76c250-5799-4c50-a044-a125a78f3ff6/volumes" Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.032001 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b438313-1d91-4aaf-83f3-f0b56b3c3d62" path="/var/lib/kubelet/pods/9b438313-1d91-4aaf-83f3-f0b56b3c3d62/volumes" Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.033053 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c64f135c-2d61-43f9-8e93-b4067f1a3103" path="/var/lib/kubelet/pods/c64f135c-2d61-43f9-8e93-b4067f1a3103/volumes" Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.621401 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.716460 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-config-data\") pod \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\" (UID: \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\") " Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.717256 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-combined-ca-bundle\") pod \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\" (UID: \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\") " Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.717493 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-logs\") pod \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\" (UID: \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\") " Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.717610 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7fg89\" (UniqueName: \"kubernetes.io/projected/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-kube-api-access-7fg89\") pod \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\" (UID: \"cbff5ac7-125a-4a10-bf82-38b423cb6e9b\") " Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.717882 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-logs" (OuterVolumeSpecName: "logs") pod "cbff5ac7-125a-4a10-bf82-38b423cb6e9b" (UID: "cbff5ac7-125a-4a10-bf82-38b423cb6e9b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.718158 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.734471 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-kube-api-access-7fg89" (OuterVolumeSpecName: "kube-api-access-7fg89") pod "cbff5ac7-125a-4a10-bf82-38b423cb6e9b" (UID: "cbff5ac7-125a-4a10-bf82-38b423cb6e9b"). InnerVolumeSpecName "kube-api-access-7fg89". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.739061 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cbff5ac7-125a-4a10-bf82-38b423cb6e9b" (UID: "cbff5ac7-125a-4a10-bf82-38b423cb6e9b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.762112 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-config-data" (OuterVolumeSpecName: "config-data") pod "cbff5ac7-125a-4a10-bf82-38b423cb6e9b" (UID: "cbff5ac7-125a-4a10-bf82-38b423cb6e9b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.819597 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7fg89\" (UniqueName: \"kubernetes.io/projected/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-kube-api-access-7fg89\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.819632 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.819642 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbff5ac7-125a-4a10-bf82-38b423cb6e9b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.877320 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7ebe0898-feb2-4e12-b4be-efad66862264","Type":"ContainerStarted","Data":"a00f7eea40e81f5402882576871a0e6df8d636b0158eacc93f55cc8bab82fefa"} Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.878812 4818 generic.go:334] "Generic (PLEG): container finished" podID="cbff5ac7-125a-4a10-bf82-38b423cb6e9b" containerID="4e1edfb86c9a1b0e8a11dc73860cae3848b79f606b3cdc17f798efbb09cfb78f" exitCode=0 Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.878859 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"cbff5ac7-125a-4a10-bf82-38b423cb6e9b","Type":"ContainerDied","Data":"4e1edfb86c9a1b0e8a11dc73860cae3848b79f606b3cdc17f798efbb09cfb78f"} Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.878869 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.878889 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"cbff5ac7-125a-4a10-bf82-38b423cb6e9b","Type":"ContainerDied","Data":"d3d56fe505ef508e91eb45027878f33e9cb5e2ea2168dfcaf6cd4c41aa59ad4a"} Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.878912 4818 scope.go:117] "RemoveContainer" containerID="4e1edfb86c9a1b0e8a11dc73860cae3848b79f606b3cdc17f798efbb09cfb78f" Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.906319 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.915962 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.919490 4818 scope.go:117] "RemoveContainer" containerID="4e1edfb86c9a1b0e8a11dc73860cae3848b79f606b3cdc17f798efbb09cfb78f" Sep 30 17:19:58 crc kubenswrapper[4818]: E0930 17:19:58.920003 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e1edfb86c9a1b0e8a11dc73860cae3848b79f606b3cdc17f798efbb09cfb78f\": container with ID starting with 4e1edfb86c9a1b0e8a11dc73860cae3848b79f606b3cdc17f798efbb09cfb78f not found: ID does not exist" containerID="4e1edfb86c9a1b0e8a11dc73860cae3848b79f606b3cdc17f798efbb09cfb78f" Sep 30 17:19:58 crc kubenswrapper[4818]: I0930 17:19:58.920042 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e1edfb86c9a1b0e8a11dc73860cae3848b79f606b3cdc17f798efbb09cfb78f"} err="failed to get container status \"4e1edfb86c9a1b0e8a11dc73860cae3848b79f606b3cdc17f798efbb09cfb78f\": rpc error: code = NotFound desc = could not find container \"4e1edfb86c9a1b0e8a11dc73860cae3848b79f606b3cdc17f798efbb09cfb78f\": container with ID starting with 4e1edfb86c9a1b0e8a11dc73860cae3848b79f606b3cdc17f798efbb09cfb78f not found: ID does not exist" Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.791221 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.845154 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-config-data\") pod \"95860e29-9750-42bf-b996-2e7513a86e05\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.845267 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-combined-ca-bundle\") pod \"95860e29-9750-42bf-b996-2e7513a86e05\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.845370 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-custom-prometheus-ca\") pod \"95860e29-9750-42bf-b996-2e7513a86e05\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.845404 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zjgt\" (UniqueName: \"kubernetes.io/projected/95860e29-9750-42bf-b996-2e7513a86e05-kube-api-access-4zjgt\") pod \"95860e29-9750-42bf-b996-2e7513a86e05\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.845484 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/95860e29-9750-42bf-b996-2e7513a86e05-logs\") pod \"95860e29-9750-42bf-b996-2e7513a86e05\" (UID: \"95860e29-9750-42bf-b996-2e7513a86e05\") " Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.846153 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95860e29-9750-42bf-b996-2e7513a86e05-logs" (OuterVolumeSpecName: "logs") pod "95860e29-9750-42bf-b996-2e7513a86e05" (UID: "95860e29-9750-42bf-b996-2e7513a86e05"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.860191 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95860e29-9750-42bf-b996-2e7513a86e05-kube-api-access-4zjgt" (OuterVolumeSpecName: "kube-api-access-4zjgt") pod "95860e29-9750-42bf-b996-2e7513a86e05" (UID: "95860e29-9750-42bf-b996-2e7513a86e05"). InnerVolumeSpecName "kube-api-access-4zjgt". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.894630 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "95860e29-9750-42bf-b996-2e7513a86e05" (UID: "95860e29-9750-42bf-b996-2e7513a86e05"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.906259 4818 generic.go:334] "Generic (PLEG): container finished" podID="95860e29-9750-42bf-b996-2e7513a86e05" containerID="66a003e96d7f6a33ad0ea2ee7cc4542273d637f772c0b899b152664e0b04190c" exitCode=0 Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.906371 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"95860e29-9750-42bf-b996-2e7513a86e05","Type":"ContainerDied","Data":"66a003e96d7f6a33ad0ea2ee7cc4542273d637f772c0b899b152664e0b04190c"} Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.906439 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"95860e29-9750-42bf-b996-2e7513a86e05","Type":"ContainerDied","Data":"cdaf693e04537b38a5b43016d5340e907e56171c0d9ae6f8af7d71576c48041d"} Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.906452 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.906492 4818 scope.go:117] "RemoveContainer" containerID="66a003e96d7f6a33ad0ea2ee7cc4542273d637f772c0b899b152664e0b04190c" Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.910396 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "95860e29-9750-42bf-b996-2e7513a86e05" (UID: "95860e29-9750-42bf-b996-2e7513a86e05"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.912506 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7ebe0898-feb2-4e12-b4be-efad66862264","Type":"ContainerStarted","Data":"2a44ba961fa006fdd17ee3b0a8afe170045f3d05fe030ea957848f0979747f7d"} Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.925652 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-config-data" (OuterVolumeSpecName: "config-data") pod "95860e29-9750-42bf-b996-2e7513a86e05" (UID: "95860e29-9750-42bf-b996-2e7513a86e05"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.943456 4818 scope.go:117] "RemoveContainer" containerID="66a003e96d7f6a33ad0ea2ee7cc4542273d637f772c0b899b152664e0b04190c" Sep 30 17:19:59 crc kubenswrapper[4818]: E0930 17:19:59.947198 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66a003e96d7f6a33ad0ea2ee7cc4542273d637f772c0b899b152664e0b04190c\": container with ID starting with 66a003e96d7f6a33ad0ea2ee7cc4542273d637f772c0b899b152664e0b04190c not found: ID does not exist" containerID="66a003e96d7f6a33ad0ea2ee7cc4542273d637f772c0b899b152664e0b04190c" Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.947230 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66a003e96d7f6a33ad0ea2ee7cc4542273d637f772c0b899b152664e0b04190c"} err="failed to get container status \"66a003e96d7f6a33ad0ea2ee7cc4542273d637f772c0b899b152664e0b04190c\": rpc error: code = NotFound desc = could not find container \"66a003e96d7f6a33ad0ea2ee7cc4542273d637f772c0b899b152664e0b04190c\": container with ID starting with 66a003e96d7f6a33ad0ea2ee7cc4542273d637f772c0b899b152664e0b04190c not found: ID does not exist" Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.949209 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.949249 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.949263 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/95860e29-9750-42bf-b996-2e7513a86e05-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.949278 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zjgt\" (UniqueName: \"kubernetes.io/projected/95860e29-9750-42bf-b996-2e7513a86e05-kube-api-access-4zjgt\") on node \"crc\" DevicePath \"\"" Sep 30 17:19:59 crc kubenswrapper[4818]: I0930 17:19:59.949290 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/95860e29-9750-42bf-b996-2e7513a86e05-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:00 crc kubenswrapper[4818]: I0930 17:20:00.045497 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cbff5ac7-125a-4a10-bf82-38b423cb6e9b" path="/var/lib/kubelet/pods/cbff5ac7-125a-4a10-bf82-38b423cb6e9b/volumes" Sep 30 17:20:00 crc kubenswrapper[4818]: I0930 17:20:00.234304 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:20:00 crc kubenswrapper[4818]: I0930 17:20:00.239682 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:20:00 crc kubenswrapper[4818]: I0930 17:20:00.929871 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7ebe0898-feb2-4e12-b4be-efad66862264","Type":"ContainerStarted","Data":"54c063863ba6f3ba7892ac6f17e14f5e56e0254f618fececdb087b274c254251"} Sep 30 17:20:00 crc kubenswrapper[4818]: I0930 17:20:00.932272 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:00 crc kubenswrapper[4818]: I0930 17:20:00.976780 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=1.415435951 podStartE2EDuration="4.976757075s" podCreationTimestamp="2025-09-30 17:19:56 +0000 UTC" firstStartedPulling="2025-09-30 17:19:57.049185071 +0000 UTC m=+1243.803456887" lastFinishedPulling="2025-09-30 17:20:00.610506195 +0000 UTC m=+1247.364778011" observedRunningTime="2025-09-30 17:20:00.964749741 +0000 UTC m=+1247.719021627" watchObservedRunningTime="2025-09-30 17:20:00.976757075 +0000 UTC m=+1247.731028891" Sep 30 17:20:02 crc kubenswrapper[4818]: I0930 17:20:02.031617 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95860e29-9750-42bf-b996-2e7513a86e05" path="/var/lib/kubelet/pods/95860e29-9750-42bf-b996-2e7513a86e05/volumes" Sep 30 17:20:02 crc kubenswrapper[4818]: I0930 17:20:02.136145 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-db-create-68b94"] Sep 30 17:20:02 crc kubenswrapper[4818]: E0930 17:20:02.136526 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbff5ac7-125a-4a10-bf82-38b423cb6e9b" containerName="watcher-applier" Sep 30 17:20:02 crc kubenswrapper[4818]: I0930 17:20:02.136545 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbff5ac7-125a-4a10-bf82-38b423cb6e9b" containerName="watcher-applier" Sep 30 17:20:02 crc kubenswrapper[4818]: E0930 17:20:02.136568 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95860e29-9750-42bf-b996-2e7513a86e05" containerName="watcher-decision-engine" Sep 30 17:20:02 crc kubenswrapper[4818]: I0930 17:20:02.136576 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="95860e29-9750-42bf-b996-2e7513a86e05" containerName="watcher-decision-engine" Sep 30 17:20:02 crc kubenswrapper[4818]: I0930 17:20:02.136761 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="95860e29-9750-42bf-b996-2e7513a86e05" containerName="watcher-decision-engine" Sep 30 17:20:02 crc kubenswrapper[4818]: I0930 17:20:02.136793 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbff5ac7-125a-4a10-bf82-38b423cb6e9b" containerName="watcher-applier" Sep 30 17:20:02 crc kubenswrapper[4818]: I0930 17:20:02.137472 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-68b94" Sep 30 17:20:02 crc kubenswrapper[4818]: I0930 17:20:02.150321 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-68b94"] Sep 30 17:20:02 crc kubenswrapper[4818]: I0930 17:20:02.182650 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7scg\" (UniqueName: \"kubernetes.io/projected/ff89dd96-80d3-4dd9-b47d-ea3aac81479d-kube-api-access-t7scg\") pod \"watcher-db-create-68b94\" (UID: \"ff89dd96-80d3-4dd9-b47d-ea3aac81479d\") " pod="watcher-kuttl-default/watcher-db-create-68b94" Sep 30 17:20:02 crc kubenswrapper[4818]: I0930 17:20:02.283525 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7scg\" (UniqueName: \"kubernetes.io/projected/ff89dd96-80d3-4dd9-b47d-ea3aac81479d-kube-api-access-t7scg\") pod \"watcher-db-create-68b94\" (UID: \"ff89dd96-80d3-4dd9-b47d-ea3aac81479d\") " pod="watcher-kuttl-default/watcher-db-create-68b94" Sep 30 17:20:02 crc kubenswrapper[4818]: I0930 17:20:02.302405 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7scg\" (UniqueName: \"kubernetes.io/projected/ff89dd96-80d3-4dd9-b47d-ea3aac81479d-kube-api-access-t7scg\") pod \"watcher-db-create-68b94\" (UID: \"ff89dd96-80d3-4dd9-b47d-ea3aac81479d\") " pod="watcher-kuttl-default/watcher-db-create-68b94" Sep 30 17:20:02 crc kubenswrapper[4818]: I0930 17:20:02.484382 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-68b94" Sep 30 17:20:02 crc kubenswrapper[4818]: I0930 17:20:02.992863 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-68b94"] Sep 30 17:20:03 crc kubenswrapper[4818]: I0930 17:20:03.968137 4818 generic.go:334] "Generic (PLEG): container finished" podID="ff89dd96-80d3-4dd9-b47d-ea3aac81479d" containerID="1b9863a9bbfd7aa35e4971c02ce5bc6dca01a8d6f74cfed8d6236a8b2c12ac1f" exitCode=0 Sep 30 17:20:03 crc kubenswrapper[4818]: I0930 17:20:03.968515 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-68b94" event={"ID":"ff89dd96-80d3-4dd9-b47d-ea3aac81479d","Type":"ContainerDied","Data":"1b9863a9bbfd7aa35e4971c02ce5bc6dca01a8d6f74cfed8d6236a8b2c12ac1f"} Sep 30 17:20:03 crc kubenswrapper[4818]: I0930 17:20:03.968561 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-68b94" event={"ID":"ff89dd96-80d3-4dd9-b47d-ea3aac81479d","Type":"ContainerStarted","Data":"2674b0eecb1eb7428a1eeeb43a3269c33d807ef5aed728486cf461685c80c15e"} Sep 30 17:20:05 crc kubenswrapper[4818]: I0930 17:20:05.403906 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-68b94" Sep 30 17:20:05 crc kubenswrapper[4818]: I0930 17:20:05.579316 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7scg\" (UniqueName: \"kubernetes.io/projected/ff89dd96-80d3-4dd9-b47d-ea3aac81479d-kube-api-access-t7scg\") pod \"ff89dd96-80d3-4dd9-b47d-ea3aac81479d\" (UID: \"ff89dd96-80d3-4dd9-b47d-ea3aac81479d\") " Sep 30 17:20:05 crc kubenswrapper[4818]: I0930 17:20:05.584514 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff89dd96-80d3-4dd9-b47d-ea3aac81479d-kube-api-access-t7scg" (OuterVolumeSpecName: "kube-api-access-t7scg") pod "ff89dd96-80d3-4dd9-b47d-ea3aac81479d" (UID: "ff89dd96-80d3-4dd9-b47d-ea3aac81479d"). InnerVolumeSpecName "kube-api-access-t7scg". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:20:05 crc kubenswrapper[4818]: I0930 17:20:05.680740 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7scg\" (UniqueName: \"kubernetes.io/projected/ff89dd96-80d3-4dd9-b47d-ea3aac81479d-kube-api-access-t7scg\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:05 crc kubenswrapper[4818]: I0930 17:20:05.990946 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-68b94" event={"ID":"ff89dd96-80d3-4dd9-b47d-ea3aac81479d","Type":"ContainerDied","Data":"2674b0eecb1eb7428a1eeeb43a3269c33d807ef5aed728486cf461685c80c15e"} Sep 30 17:20:05 crc kubenswrapper[4818]: I0930 17:20:05.990997 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2674b0eecb1eb7428a1eeeb43a3269c33d807ef5aed728486cf461685c80c15e" Sep 30 17:20:05 crc kubenswrapper[4818]: I0930 17:20:05.991068 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-68b94" Sep 30 17:20:12 crc kubenswrapper[4818]: I0930 17:20:12.133797 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-ab9d-account-create-g26cq"] Sep 30 17:20:12 crc kubenswrapper[4818]: E0930 17:20:12.134620 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff89dd96-80d3-4dd9-b47d-ea3aac81479d" containerName="mariadb-database-create" Sep 30 17:20:12 crc kubenswrapper[4818]: I0930 17:20:12.134636 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff89dd96-80d3-4dd9-b47d-ea3aac81479d" containerName="mariadb-database-create" Sep 30 17:20:12 crc kubenswrapper[4818]: I0930 17:20:12.134798 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff89dd96-80d3-4dd9-b47d-ea3aac81479d" containerName="mariadb-database-create" Sep 30 17:20:12 crc kubenswrapper[4818]: I0930 17:20:12.135320 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-ab9d-account-create-g26cq" Sep 30 17:20:12 crc kubenswrapper[4818]: I0930 17:20:12.137964 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-db-secret" Sep 30 17:20:12 crc kubenswrapper[4818]: I0930 17:20:12.148799 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-ab9d-account-create-g26cq"] Sep 30 17:20:12 crc kubenswrapper[4818]: I0930 17:20:12.285205 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqff5\" (UniqueName: \"kubernetes.io/projected/3e9a8a99-63a3-4298-84a9-5a5717bf18b7-kube-api-access-rqff5\") pod \"watcher-ab9d-account-create-g26cq\" (UID: \"3e9a8a99-63a3-4298-84a9-5a5717bf18b7\") " pod="watcher-kuttl-default/watcher-ab9d-account-create-g26cq" Sep 30 17:20:12 crc kubenswrapper[4818]: I0930 17:20:12.387240 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqff5\" (UniqueName: \"kubernetes.io/projected/3e9a8a99-63a3-4298-84a9-5a5717bf18b7-kube-api-access-rqff5\") pod \"watcher-ab9d-account-create-g26cq\" (UID: \"3e9a8a99-63a3-4298-84a9-5a5717bf18b7\") " pod="watcher-kuttl-default/watcher-ab9d-account-create-g26cq" Sep 30 17:20:12 crc kubenswrapper[4818]: I0930 17:20:12.419865 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqff5\" (UniqueName: \"kubernetes.io/projected/3e9a8a99-63a3-4298-84a9-5a5717bf18b7-kube-api-access-rqff5\") pod \"watcher-ab9d-account-create-g26cq\" (UID: \"3e9a8a99-63a3-4298-84a9-5a5717bf18b7\") " pod="watcher-kuttl-default/watcher-ab9d-account-create-g26cq" Sep 30 17:20:12 crc kubenswrapper[4818]: I0930 17:20:12.449915 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-ab9d-account-create-g26cq" Sep 30 17:20:12 crc kubenswrapper[4818]: W0930 17:20:12.983714 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e9a8a99_63a3_4298_84a9_5a5717bf18b7.slice/crio-8417225070ac5b4b26ed825e9ca6f39bcecf83b7042712d3882466cd756c4101 WatchSource:0}: Error finding container 8417225070ac5b4b26ed825e9ca6f39bcecf83b7042712d3882466cd756c4101: Status 404 returned error can't find the container with id 8417225070ac5b4b26ed825e9ca6f39bcecf83b7042712d3882466cd756c4101 Sep 30 17:20:12 crc kubenswrapper[4818]: I0930 17:20:12.988387 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-ab9d-account-create-g26cq"] Sep 30 17:20:13 crc kubenswrapper[4818]: I0930 17:20:13.051763 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-ab9d-account-create-g26cq" event={"ID":"3e9a8a99-63a3-4298-84a9-5a5717bf18b7","Type":"ContainerStarted","Data":"8417225070ac5b4b26ed825e9ca6f39bcecf83b7042712d3882466cd756c4101"} Sep 30 17:20:14 crc kubenswrapper[4818]: I0930 17:20:14.060775 4818 generic.go:334] "Generic (PLEG): container finished" podID="3e9a8a99-63a3-4298-84a9-5a5717bf18b7" containerID="0eaf6396e5a1e35819db86e64eb39716096c96070dece87bf3ee4f69103ab646" exitCode=0 Sep 30 17:20:14 crc kubenswrapper[4818]: I0930 17:20:14.060897 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-ab9d-account-create-g26cq" event={"ID":"3e9a8a99-63a3-4298-84a9-5a5717bf18b7","Type":"ContainerDied","Data":"0eaf6396e5a1e35819db86e64eb39716096c96070dece87bf3ee4f69103ab646"} Sep 30 17:20:15 crc kubenswrapper[4818]: I0930 17:20:15.355451 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-ab9d-account-create-g26cq" Sep 30 17:20:15 crc kubenswrapper[4818]: I0930 17:20:15.541617 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rqff5\" (UniqueName: \"kubernetes.io/projected/3e9a8a99-63a3-4298-84a9-5a5717bf18b7-kube-api-access-rqff5\") pod \"3e9a8a99-63a3-4298-84a9-5a5717bf18b7\" (UID: \"3e9a8a99-63a3-4298-84a9-5a5717bf18b7\") " Sep 30 17:20:15 crc kubenswrapper[4818]: I0930 17:20:15.553103 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e9a8a99-63a3-4298-84a9-5a5717bf18b7-kube-api-access-rqff5" (OuterVolumeSpecName: "kube-api-access-rqff5") pod "3e9a8a99-63a3-4298-84a9-5a5717bf18b7" (UID: "3e9a8a99-63a3-4298-84a9-5a5717bf18b7"). InnerVolumeSpecName "kube-api-access-rqff5". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:20:15 crc kubenswrapper[4818]: I0930 17:20:15.643667 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rqff5\" (UniqueName: \"kubernetes.io/projected/3e9a8a99-63a3-4298-84a9-5a5717bf18b7-kube-api-access-rqff5\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:16 crc kubenswrapper[4818]: I0930 17:20:16.089461 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-ab9d-account-create-g26cq" event={"ID":"3e9a8a99-63a3-4298-84a9-5a5717bf18b7","Type":"ContainerDied","Data":"8417225070ac5b4b26ed825e9ca6f39bcecf83b7042712d3882466cd756c4101"} Sep 30 17:20:16 crc kubenswrapper[4818]: I0930 17:20:16.089916 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8417225070ac5b4b26ed825e9ca6f39bcecf83b7042712d3882466cd756c4101" Sep 30 17:20:16 crc kubenswrapper[4818]: I0930 17:20:16.089512 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-ab9d-account-create-g26cq" Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.318843 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-s746z"] Sep 30 17:20:17 crc kubenswrapper[4818]: E0930 17:20:17.320056 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e9a8a99-63a3-4298-84a9-5a5717bf18b7" containerName="mariadb-account-create" Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.320155 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e9a8a99-63a3-4298-84a9-5a5717bf18b7" containerName="mariadb-account-create" Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.320512 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e9a8a99-63a3-4298-84a9-5a5717bf18b7" containerName="mariadb-account-create" Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.321334 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-s746z"] Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.321515 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.354743 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-crwgj" Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.355477 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-config-data" Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.372434 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1809dbc0-d11c-4fb4-9147-20b55e385130-db-sync-config-data\") pod \"watcher-kuttl-db-sync-s746z\" (UID: \"1809dbc0-d11c-4fb4-9147-20b55e385130\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.372520 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1809dbc0-d11c-4fb4-9147-20b55e385130-config-data\") pod \"watcher-kuttl-db-sync-s746z\" (UID: \"1809dbc0-d11c-4fb4-9147-20b55e385130\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.372560 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkpvx\" (UniqueName: \"kubernetes.io/projected/1809dbc0-d11c-4fb4-9147-20b55e385130-kube-api-access-lkpvx\") pod \"watcher-kuttl-db-sync-s746z\" (UID: \"1809dbc0-d11c-4fb4-9147-20b55e385130\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.372645 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1809dbc0-d11c-4fb4-9147-20b55e385130-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-s746z\" (UID: \"1809dbc0-d11c-4fb4-9147-20b55e385130\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.474196 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkpvx\" (UniqueName: \"kubernetes.io/projected/1809dbc0-d11c-4fb4-9147-20b55e385130-kube-api-access-lkpvx\") pod \"watcher-kuttl-db-sync-s746z\" (UID: \"1809dbc0-d11c-4fb4-9147-20b55e385130\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.474241 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1809dbc0-d11c-4fb4-9147-20b55e385130-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-s746z\" (UID: \"1809dbc0-d11c-4fb4-9147-20b55e385130\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.474313 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1809dbc0-d11c-4fb4-9147-20b55e385130-db-sync-config-data\") pod \"watcher-kuttl-db-sync-s746z\" (UID: \"1809dbc0-d11c-4fb4-9147-20b55e385130\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.474370 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1809dbc0-d11c-4fb4-9147-20b55e385130-config-data\") pod \"watcher-kuttl-db-sync-s746z\" (UID: \"1809dbc0-d11c-4fb4-9147-20b55e385130\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.481115 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1809dbc0-d11c-4fb4-9147-20b55e385130-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-s746z\" (UID: \"1809dbc0-d11c-4fb4-9147-20b55e385130\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.481436 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1809dbc0-d11c-4fb4-9147-20b55e385130-config-data\") pod \"watcher-kuttl-db-sync-s746z\" (UID: \"1809dbc0-d11c-4fb4-9147-20b55e385130\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.482303 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1809dbc0-d11c-4fb4-9147-20b55e385130-db-sync-config-data\") pod \"watcher-kuttl-db-sync-s746z\" (UID: \"1809dbc0-d11c-4fb4-9147-20b55e385130\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.497750 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkpvx\" (UniqueName: \"kubernetes.io/projected/1809dbc0-d11c-4fb4-9147-20b55e385130-kube-api-access-lkpvx\") pod \"watcher-kuttl-db-sync-s746z\" (UID: \"1809dbc0-d11c-4fb4-9147-20b55e385130\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" Sep 30 17:20:17 crc kubenswrapper[4818]: I0930 17:20:17.676247 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" Sep 30 17:20:18 crc kubenswrapper[4818]: I0930 17:20:18.205711 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-s746z"] Sep 30 17:20:19 crc kubenswrapper[4818]: I0930 17:20:19.114185 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" event={"ID":"1809dbc0-d11c-4fb4-9147-20b55e385130","Type":"ContainerStarted","Data":"82def7b5c6178da1bab4750adb32623e59ef5a6fe90efbb416a24abed939eaea"} Sep 30 17:20:19 crc kubenswrapper[4818]: I0930 17:20:19.114520 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" event={"ID":"1809dbc0-d11c-4fb4-9147-20b55e385130","Type":"ContainerStarted","Data":"31eda33853517eea6e26f823513eb92a5afc4b4da84908228d0724d4f677a2ae"} Sep 30 17:20:19 crc kubenswrapper[4818]: I0930 17:20:19.130606 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" podStartSLOduration=2.130585269 podStartE2EDuration="2.130585269s" podCreationTimestamp="2025-09-30 17:20:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:20:19.127553238 +0000 UTC m=+1265.881825064" watchObservedRunningTime="2025-09-30 17:20:19.130585269 +0000 UTC m=+1265.884857085" Sep 30 17:20:21 crc kubenswrapper[4818]: I0930 17:20:21.129709 4818 generic.go:334] "Generic (PLEG): container finished" podID="1809dbc0-d11c-4fb4-9147-20b55e385130" containerID="82def7b5c6178da1bab4750adb32623e59ef5a6fe90efbb416a24abed939eaea" exitCode=0 Sep 30 17:20:21 crc kubenswrapper[4818]: I0930 17:20:21.129812 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" event={"ID":"1809dbc0-d11c-4fb4-9147-20b55e385130","Type":"ContainerDied","Data":"82def7b5c6178da1bab4750adb32623e59ef5a6fe90efbb416a24abed939eaea"} Sep 30 17:20:22 crc kubenswrapper[4818]: I0930 17:20:22.506321 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" Sep 30 17:20:22 crc kubenswrapper[4818]: I0930 17:20:22.596196 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:20:22 crc kubenswrapper[4818]: I0930 17:20:22.596250 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:20:22 crc kubenswrapper[4818]: I0930 17:20:22.648728 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1809dbc0-d11c-4fb4-9147-20b55e385130-combined-ca-bundle\") pod \"1809dbc0-d11c-4fb4-9147-20b55e385130\" (UID: \"1809dbc0-d11c-4fb4-9147-20b55e385130\") " Sep 30 17:20:22 crc kubenswrapper[4818]: I0930 17:20:22.648804 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1809dbc0-d11c-4fb4-9147-20b55e385130-db-sync-config-data\") pod \"1809dbc0-d11c-4fb4-9147-20b55e385130\" (UID: \"1809dbc0-d11c-4fb4-9147-20b55e385130\") " Sep 30 17:20:22 crc kubenswrapper[4818]: I0930 17:20:22.648871 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1809dbc0-d11c-4fb4-9147-20b55e385130-config-data\") pod \"1809dbc0-d11c-4fb4-9147-20b55e385130\" (UID: \"1809dbc0-d11c-4fb4-9147-20b55e385130\") " Sep 30 17:20:22 crc kubenswrapper[4818]: I0930 17:20:22.649212 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkpvx\" (UniqueName: \"kubernetes.io/projected/1809dbc0-d11c-4fb4-9147-20b55e385130-kube-api-access-lkpvx\") pod \"1809dbc0-d11c-4fb4-9147-20b55e385130\" (UID: \"1809dbc0-d11c-4fb4-9147-20b55e385130\") " Sep 30 17:20:22 crc kubenswrapper[4818]: I0930 17:20:22.653682 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1809dbc0-d11c-4fb4-9147-20b55e385130-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "1809dbc0-d11c-4fb4-9147-20b55e385130" (UID: "1809dbc0-d11c-4fb4-9147-20b55e385130"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:22 crc kubenswrapper[4818]: I0930 17:20:22.653943 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1809dbc0-d11c-4fb4-9147-20b55e385130-kube-api-access-lkpvx" (OuterVolumeSpecName: "kube-api-access-lkpvx") pod "1809dbc0-d11c-4fb4-9147-20b55e385130" (UID: "1809dbc0-d11c-4fb4-9147-20b55e385130"). InnerVolumeSpecName "kube-api-access-lkpvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:20:22 crc kubenswrapper[4818]: I0930 17:20:22.690802 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1809dbc0-d11c-4fb4-9147-20b55e385130-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1809dbc0-d11c-4fb4-9147-20b55e385130" (UID: "1809dbc0-d11c-4fb4-9147-20b55e385130"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:22 crc kubenswrapper[4818]: I0930 17:20:22.726240 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1809dbc0-d11c-4fb4-9147-20b55e385130-config-data" (OuterVolumeSpecName: "config-data") pod "1809dbc0-d11c-4fb4-9147-20b55e385130" (UID: "1809dbc0-d11c-4fb4-9147-20b55e385130"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:22 crc kubenswrapper[4818]: I0930 17:20:22.750869 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1809dbc0-d11c-4fb4-9147-20b55e385130-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:22 crc kubenswrapper[4818]: I0930 17:20:22.750914 4818 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1809dbc0-d11c-4fb4-9147-20b55e385130-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:22 crc kubenswrapper[4818]: I0930 17:20:22.750948 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1809dbc0-d11c-4fb4-9147-20b55e385130-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:22 crc kubenswrapper[4818]: I0930 17:20:22.750961 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lkpvx\" (UniqueName: \"kubernetes.io/projected/1809dbc0-d11c-4fb4-9147-20b55e385130-kube-api-access-lkpvx\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.150841 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" event={"ID":"1809dbc0-d11c-4fb4-9147-20b55e385130","Type":"ContainerDied","Data":"31eda33853517eea6e26f823513eb92a5afc4b4da84908228d0724d4f677a2ae"} Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.150890 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="31eda33853517eea6e26f823513eb92a5afc4b4da84908228d0724d4f677a2ae" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.150947 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-s746z" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.387971 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:20:23 crc kubenswrapper[4818]: E0930 17:20:23.388329 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1809dbc0-d11c-4fb4-9147-20b55e385130" containerName="watcher-kuttl-db-sync" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.388349 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="1809dbc0-d11c-4fb4-9147-20b55e385130" containerName="watcher-kuttl-db-sync" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.388528 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="1809dbc0-d11c-4fb4-9147-20b55e385130" containerName="watcher-kuttl-db-sync" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.389562 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.394649 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-watcher-public-svc" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.394878 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-crwgj" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.395089 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.396510 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-watcher-internal-svc" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.409200 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.415342 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.416461 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.417964 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-applier-config-data" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.430479 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.479717 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.480836 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.484998 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.499296 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.564717 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbm96\" (UniqueName: \"kubernetes.io/projected/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-kube-api-access-xbm96\") pod \"watcher-kuttl-applier-0\" (UID: \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.564770 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19a48780-51ab-4568-9e7a-60a5fbb70dfa-logs\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.564844 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.564879 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.564945 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.564968 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.564996 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hfkf\" (UniqueName: \"kubernetes.io/projected/19a48780-51ab-4568-9e7a-60a5fbb70dfa-kube-api-access-8hfkf\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.565029 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.565095 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.565153 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.565176 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.666313 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19a48780-51ab-4568-9e7a-60a5fbb70dfa-logs\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.666626 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.666660 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba9805f8-74cf-4618-b75b-1554d5b3670e-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.666689 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.667281 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.667336 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.667361 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hfkf\" (UniqueName: \"kubernetes.io/projected/19a48780-51ab-4568-9e7a-60a5fbb70dfa-kube-api-access-8hfkf\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.667276 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19a48780-51ab-4568-9e7a-60a5fbb70dfa-logs\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.667453 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ba9805f8-74cf-4618-b75b-1554d5b3670e-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.667755 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.667799 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba9805f8-74cf-4618-b75b-1554d5b3670e-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.667855 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.667906 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6tmx\" (UniqueName: \"kubernetes.io/projected/ba9805f8-74cf-4618-b75b-1554d5b3670e-kube-api-access-g6tmx\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.667970 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba9805f8-74cf-4618-b75b-1554d5b3670e-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.668044 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.668068 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.668393 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.672447 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbm96\" (UniqueName: \"kubernetes.io/projected/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-kube-api-access-xbm96\") pod \"watcher-kuttl-applier-0\" (UID: \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.675778 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.676010 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.695681 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.696151 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.699558 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.700734 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hfkf\" (UniqueName: \"kubernetes.io/projected/19a48780-51ab-4568-9e7a-60a5fbb70dfa-kube-api-access-8hfkf\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.702263 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.702381 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbm96\" (UniqueName: \"kubernetes.io/projected/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-kube-api-access-xbm96\") pod \"watcher-kuttl-applier-0\" (UID: \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.703279 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.704396 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.766385 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.788765 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba9805f8-74cf-4618-b75b-1554d5b3670e-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.788843 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ba9805f8-74cf-4618-b75b-1554d5b3670e-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.788870 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba9805f8-74cf-4618-b75b-1554d5b3670e-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.788911 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6tmx\" (UniqueName: \"kubernetes.io/projected/ba9805f8-74cf-4618-b75b-1554d5b3670e-kube-api-access-g6tmx\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.788953 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba9805f8-74cf-4618-b75b-1554d5b3670e-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.789351 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba9805f8-74cf-4618-b75b-1554d5b3670e-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.795525 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ba9805f8-74cf-4618-b75b-1554d5b3670e-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.798540 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba9805f8-74cf-4618-b75b-1554d5b3670e-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.802746 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba9805f8-74cf-4618-b75b-1554d5b3670e-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:23 crc kubenswrapper[4818]: I0930 17:20:23.814387 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6tmx\" (UniqueName: \"kubernetes.io/projected/ba9805f8-74cf-4618-b75b-1554d5b3670e-kube-api-access-g6tmx\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:24 crc kubenswrapper[4818]: I0930 17:20:24.100234 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:24 crc kubenswrapper[4818]: I0930 17:20:24.293533 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:20:24 crc kubenswrapper[4818]: I0930 17:20:24.350563 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:20:24 crc kubenswrapper[4818]: I0930 17:20:24.568826 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:20:24 crc kubenswrapper[4818]: W0930 17:20:24.575076 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podba9805f8_74cf_4618_b75b_1554d5b3670e.slice/crio-eb9a41054ef6c2200358517e8f15434ff6549d5bcc4ba44813e56a63bc7631de WatchSource:0}: Error finding container eb9a41054ef6c2200358517e8f15434ff6549d5bcc4ba44813e56a63bc7631de: Status 404 returned error can't find the container with id eb9a41054ef6c2200358517e8f15434ff6549d5bcc4ba44813e56a63bc7631de Sep 30 17:20:25 crc kubenswrapper[4818]: I0930 17:20:25.188034 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"ba9805f8-74cf-4618-b75b-1554d5b3670e","Type":"ContainerStarted","Data":"3850059c5707f06a13c1e47d4945037b1f534d86cb566de29b9b60ffc72a7605"} Sep 30 17:20:25 crc kubenswrapper[4818]: I0930 17:20:25.188365 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"ba9805f8-74cf-4618-b75b-1554d5b3670e","Type":"ContainerStarted","Data":"eb9a41054ef6c2200358517e8f15434ff6549d5bcc4ba44813e56a63bc7631de"} Sep 30 17:20:25 crc kubenswrapper[4818]: I0930 17:20:25.191696 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"19a48780-51ab-4568-9e7a-60a5fbb70dfa","Type":"ContainerStarted","Data":"1ba2df1db6f920fe76243f074072ef4a7b602f1f9712234d0b4aa310b9df9a48"} Sep 30 17:20:25 crc kubenswrapper[4818]: I0930 17:20:25.191746 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"19a48780-51ab-4568-9e7a-60a5fbb70dfa","Type":"ContainerStarted","Data":"ee1b12f1c3256665291dc3901c1424e480d2778473750514ce96bf8559f48e1a"} Sep 30 17:20:25 crc kubenswrapper[4818]: I0930 17:20:25.191759 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"19a48780-51ab-4568-9e7a-60a5fbb70dfa","Type":"ContainerStarted","Data":"c4385d36245257d98c1e087f2d7637cc65b21789e04a2d7bc80c781f5411546b"} Sep 30 17:20:25 crc kubenswrapper[4818]: I0930 17:20:25.192198 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:25 crc kubenswrapper[4818]: I0930 17:20:25.197250 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f","Type":"ContainerStarted","Data":"4cfb61bb28330036a5a5f611a9e48f03a22e7bfb2a274c7991ec2de038f41bab"} Sep 30 17:20:25 crc kubenswrapper[4818]: I0930 17:20:25.197319 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f","Type":"ContainerStarted","Data":"d19cb3390ecb0fee1a96de926b5c8d2a26f9aba66227e4b3f8f82cfd780488ff"} Sep 30 17:20:25 crc kubenswrapper[4818]: I0930 17:20:25.211891 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=2.21187523 podStartE2EDuration="2.21187523s" podCreationTimestamp="2025-09-30 17:20:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:20:25.206379291 +0000 UTC m=+1271.960651097" watchObservedRunningTime="2025-09-30 17:20:25.21187523 +0000 UTC m=+1271.966147046" Sep 30 17:20:25 crc kubenswrapper[4818]: I0930 17:20:25.221383 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podStartSLOduration=2.221362186 podStartE2EDuration="2.221362186s" podCreationTimestamp="2025-09-30 17:20:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:20:25.219043113 +0000 UTC m=+1271.973314939" watchObservedRunningTime="2025-09-30 17:20:25.221362186 +0000 UTC m=+1271.975634002" Sep 30 17:20:25 crc kubenswrapper[4818]: I0930 17:20:25.242665 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=2.242647061 podStartE2EDuration="2.242647061s" podCreationTimestamp="2025-09-30 17:20:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:20:25.237238245 +0000 UTC m=+1271.991510081" watchObservedRunningTime="2025-09-30 17:20:25.242647061 +0000 UTC m=+1271.996918888" Sep 30 17:20:26 crc kubenswrapper[4818]: I0930 17:20:26.559721 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:27 crc kubenswrapper[4818]: I0930 17:20:27.376571 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:28 crc kubenswrapper[4818]: I0930 17:20:28.705841 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:28 crc kubenswrapper[4818]: I0930 17:20:28.767041 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:33 crc kubenswrapper[4818]: I0930 17:20:33.705321 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:33 crc kubenswrapper[4818]: I0930 17:20:33.720726 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:33 crc kubenswrapper[4818]: I0930 17:20:33.767716 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:33 crc kubenswrapper[4818]: I0930 17:20:33.797969 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:34 crc kubenswrapper[4818]: I0930 17:20:34.101097 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:34 crc kubenswrapper[4818]: I0930 17:20:34.139948 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:34 crc kubenswrapper[4818]: I0930 17:20:34.283478 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:34 crc kubenswrapper[4818]: I0930 17:20:34.293411 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:34 crc kubenswrapper[4818]: I0930 17:20:34.315704 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:34 crc kubenswrapper[4818]: I0930 17:20:34.318691 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:36 crc kubenswrapper[4818]: I0930 17:20:36.515813 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:20:36 crc kubenswrapper[4818]: I0930 17:20:36.516542 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="7ebe0898-feb2-4e12-b4be-efad66862264" containerName="ceilometer-central-agent" containerID="cri-o://73d7561c0e62697f0ebfd4b2638443d8e7dfa3385b5d5a188f830b1880e7d6f7" gracePeriod=30 Sep 30 17:20:36 crc kubenswrapper[4818]: I0930 17:20:36.516648 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="7ebe0898-feb2-4e12-b4be-efad66862264" containerName="ceilometer-notification-agent" containerID="cri-o://a00f7eea40e81f5402882576871a0e6df8d636b0158eacc93f55cc8bab82fefa" gracePeriod=30 Sep 30 17:20:36 crc kubenswrapper[4818]: I0930 17:20:36.516661 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="7ebe0898-feb2-4e12-b4be-efad66862264" containerName="proxy-httpd" containerID="cri-o://54c063863ba6f3ba7892ac6f17e14f5e56e0254f618fececdb087b274c254251" gracePeriod=30 Sep 30 17:20:36 crc kubenswrapper[4818]: I0930 17:20:36.516659 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="7ebe0898-feb2-4e12-b4be-efad66862264" containerName="sg-core" containerID="cri-o://2a44ba961fa006fdd17ee3b0a8afe170045f3d05fe030ea957848f0979747f7d" gracePeriod=30 Sep 30 17:20:37 crc kubenswrapper[4818]: I0930 17:20:37.311217 4818 generic.go:334] "Generic (PLEG): container finished" podID="7ebe0898-feb2-4e12-b4be-efad66862264" containerID="54c063863ba6f3ba7892ac6f17e14f5e56e0254f618fececdb087b274c254251" exitCode=0 Sep 30 17:20:37 crc kubenswrapper[4818]: I0930 17:20:37.311527 4818 generic.go:334] "Generic (PLEG): container finished" podID="7ebe0898-feb2-4e12-b4be-efad66862264" containerID="2a44ba961fa006fdd17ee3b0a8afe170045f3d05fe030ea957848f0979747f7d" exitCode=2 Sep 30 17:20:37 crc kubenswrapper[4818]: I0930 17:20:37.311540 4818 generic.go:334] "Generic (PLEG): container finished" podID="7ebe0898-feb2-4e12-b4be-efad66862264" containerID="73d7561c0e62697f0ebfd4b2638443d8e7dfa3385b5d5a188f830b1880e7d6f7" exitCode=0 Sep 30 17:20:37 crc kubenswrapper[4818]: I0930 17:20:37.311383 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7ebe0898-feb2-4e12-b4be-efad66862264","Type":"ContainerDied","Data":"54c063863ba6f3ba7892ac6f17e14f5e56e0254f618fececdb087b274c254251"} Sep 30 17:20:37 crc kubenswrapper[4818]: I0930 17:20:37.311576 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7ebe0898-feb2-4e12-b4be-efad66862264","Type":"ContainerDied","Data":"2a44ba961fa006fdd17ee3b0a8afe170045f3d05fe030ea957848f0979747f7d"} Sep 30 17:20:37 crc kubenswrapper[4818]: I0930 17:20:37.311590 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7ebe0898-feb2-4e12-b4be-efad66862264","Type":"ContainerDied","Data":"73d7561c0e62697f0ebfd4b2638443d8e7dfa3385b5d5a188f830b1880e7d6f7"} Sep 30 17:20:37 crc kubenswrapper[4818]: I0930 17:20:37.868869 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:20:37 crc kubenswrapper[4818]: I0930 17:20:37.869584 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="19a48780-51ab-4568-9e7a-60a5fbb70dfa" containerName="watcher-kuttl-api-log" containerID="cri-o://ee1b12f1c3256665291dc3901c1424e480d2778473750514ce96bf8559f48e1a" gracePeriod=30 Sep 30 17:20:37 crc kubenswrapper[4818]: I0930 17:20:37.869672 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="19a48780-51ab-4568-9e7a-60a5fbb70dfa" containerName="watcher-api" containerID="cri-o://1ba2df1db6f920fe76243f074072ef4a7b602f1f9712234d0b4aa310b9df9a48" gracePeriod=30 Sep 30 17:20:38 crc kubenswrapper[4818]: I0930 17:20:38.323164 4818 generic.go:334] "Generic (PLEG): container finished" podID="19a48780-51ab-4568-9e7a-60a5fbb70dfa" containerID="ee1b12f1c3256665291dc3901c1424e480d2778473750514ce96bf8559f48e1a" exitCode=143 Sep 30 17:20:38 crc kubenswrapper[4818]: I0930 17:20:38.323310 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"19a48780-51ab-4568-9e7a-60a5fbb70dfa","Type":"ContainerDied","Data":"ee1b12f1c3256665291dc3901c1424e480d2778473750514ce96bf8559f48e1a"} Sep 30 17:20:38 crc kubenswrapper[4818]: I0930 17:20:38.812839 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:38 crc kubenswrapper[4818]: I0930 17:20:38.973744 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-internal-tls-certs\") pod \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " Sep 30 17:20:38 crc kubenswrapper[4818]: I0930 17:20:38.973820 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-config-data\") pod \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " Sep 30 17:20:38 crc kubenswrapper[4818]: I0930 17:20:38.973859 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8hfkf\" (UniqueName: \"kubernetes.io/projected/19a48780-51ab-4568-9e7a-60a5fbb70dfa-kube-api-access-8hfkf\") pod \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " Sep 30 17:20:38 crc kubenswrapper[4818]: I0930 17:20:38.974485 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-custom-prometheus-ca\") pod \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " Sep 30 17:20:38 crc kubenswrapper[4818]: I0930 17:20:38.974533 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19a48780-51ab-4568-9e7a-60a5fbb70dfa-logs\") pod \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " Sep 30 17:20:38 crc kubenswrapper[4818]: I0930 17:20:38.974576 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-public-tls-certs\") pod \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " Sep 30 17:20:38 crc kubenswrapper[4818]: I0930 17:20:38.974612 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-combined-ca-bundle\") pod \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\" (UID: \"19a48780-51ab-4568-9e7a-60a5fbb70dfa\") " Sep 30 17:20:38 crc kubenswrapper[4818]: I0930 17:20:38.975124 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19a48780-51ab-4568-9e7a-60a5fbb70dfa-logs" (OuterVolumeSpecName: "logs") pod "19a48780-51ab-4568-9e7a-60a5fbb70dfa" (UID: "19a48780-51ab-4568-9e7a-60a5fbb70dfa"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:20:38 crc kubenswrapper[4818]: I0930 17:20:38.979395 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19a48780-51ab-4568-9e7a-60a5fbb70dfa-kube-api-access-8hfkf" (OuterVolumeSpecName: "kube-api-access-8hfkf") pod "19a48780-51ab-4568-9e7a-60a5fbb70dfa" (UID: "19a48780-51ab-4568-9e7a-60a5fbb70dfa"). InnerVolumeSpecName "kube-api-access-8hfkf". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:20:38 crc kubenswrapper[4818]: I0930 17:20:38.996794 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "19a48780-51ab-4568-9e7a-60a5fbb70dfa" (UID: "19a48780-51ab-4568-9e7a-60a5fbb70dfa"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.014435 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "19a48780-51ab-4568-9e7a-60a5fbb70dfa" (UID: "19a48780-51ab-4568-9e7a-60a5fbb70dfa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.022704 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-config-data" (OuterVolumeSpecName: "config-data") pod "19a48780-51ab-4568-9e7a-60a5fbb70dfa" (UID: "19a48780-51ab-4568-9e7a-60a5fbb70dfa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.045109 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "19a48780-51ab-4568-9e7a-60a5fbb70dfa" (UID: "19a48780-51ab-4568-9e7a-60a5fbb70dfa"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.066604 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "19a48780-51ab-4568-9e7a-60a5fbb70dfa" (UID: "19a48780-51ab-4568-9e7a-60a5fbb70dfa"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.076851 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.076880 4818 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.076889 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.076898 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8hfkf\" (UniqueName: \"kubernetes.io/projected/19a48780-51ab-4568-9e7a-60a5fbb70dfa-kube-api-access-8hfkf\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.076910 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.076918 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19a48780-51ab-4568-9e7a-60a5fbb70dfa-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.076988 4818 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19a48780-51ab-4568-9e7a-60a5fbb70dfa-public-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.332847 4818 generic.go:334] "Generic (PLEG): container finished" podID="19a48780-51ab-4568-9e7a-60a5fbb70dfa" containerID="1ba2df1db6f920fe76243f074072ef4a7b602f1f9712234d0b4aa310b9df9a48" exitCode=0 Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.332896 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"19a48780-51ab-4568-9e7a-60a5fbb70dfa","Type":"ContainerDied","Data":"1ba2df1db6f920fe76243f074072ef4a7b602f1f9712234d0b4aa310b9df9a48"} Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.332938 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"19a48780-51ab-4568-9e7a-60a5fbb70dfa","Type":"ContainerDied","Data":"c4385d36245257d98c1e087f2d7637cc65b21789e04a2d7bc80c781f5411546b"} Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.332955 4818 scope.go:117] "RemoveContainer" containerID="1ba2df1db6f920fe76243f074072ef4a7b602f1f9712234d0b4aa310b9df9a48" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.333067 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.369026 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.372840 4818 scope.go:117] "RemoveContainer" containerID="ee1b12f1c3256665291dc3901c1424e480d2778473750514ce96bf8559f48e1a" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.374381 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.403014 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:20:39 crc kubenswrapper[4818]: E0930 17:20:39.403443 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19a48780-51ab-4568-9e7a-60a5fbb70dfa" containerName="watcher-api" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.403467 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="19a48780-51ab-4568-9e7a-60a5fbb70dfa" containerName="watcher-api" Sep 30 17:20:39 crc kubenswrapper[4818]: E0930 17:20:39.403481 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19a48780-51ab-4568-9e7a-60a5fbb70dfa" containerName="watcher-kuttl-api-log" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.403491 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="19a48780-51ab-4568-9e7a-60a5fbb70dfa" containerName="watcher-kuttl-api-log" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.403708 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="19a48780-51ab-4568-9e7a-60a5fbb70dfa" containerName="watcher-kuttl-api-log" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.403742 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="19a48780-51ab-4568-9e7a-60a5fbb70dfa" containerName="watcher-api" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.405325 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.406388 4818 scope.go:117] "RemoveContainer" containerID="1ba2df1db6f920fe76243f074072ef4a7b602f1f9712234d0b4aa310b9df9a48" Sep 30 17:20:39 crc kubenswrapper[4818]: E0930 17:20:39.407899 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ba2df1db6f920fe76243f074072ef4a7b602f1f9712234d0b4aa310b9df9a48\": container with ID starting with 1ba2df1db6f920fe76243f074072ef4a7b602f1f9712234d0b4aa310b9df9a48 not found: ID does not exist" containerID="1ba2df1db6f920fe76243f074072ef4a7b602f1f9712234d0b4aa310b9df9a48" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.407942 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ba2df1db6f920fe76243f074072ef4a7b602f1f9712234d0b4aa310b9df9a48"} err="failed to get container status \"1ba2df1db6f920fe76243f074072ef4a7b602f1f9712234d0b4aa310b9df9a48\": rpc error: code = NotFound desc = could not find container \"1ba2df1db6f920fe76243f074072ef4a7b602f1f9712234d0b4aa310b9df9a48\": container with ID starting with 1ba2df1db6f920fe76243f074072ef4a7b602f1f9712234d0b4aa310b9df9a48 not found: ID does not exist" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.407964 4818 scope.go:117] "RemoveContainer" containerID="ee1b12f1c3256665291dc3901c1424e480d2778473750514ce96bf8559f48e1a" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.408059 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-watcher-internal-svc" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.408211 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.408220 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-watcher-public-svc" Sep 30 17:20:39 crc kubenswrapper[4818]: E0930 17:20:39.414163 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee1b12f1c3256665291dc3901c1424e480d2778473750514ce96bf8559f48e1a\": container with ID starting with ee1b12f1c3256665291dc3901c1424e480d2778473750514ce96bf8559f48e1a not found: ID does not exist" containerID="ee1b12f1c3256665291dc3901c1424e480d2778473750514ce96bf8559f48e1a" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.414201 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee1b12f1c3256665291dc3901c1424e480d2778473750514ce96bf8559f48e1a"} err="failed to get container status \"ee1b12f1c3256665291dc3901c1424e480d2778473750514ce96bf8559f48e1a\": rpc error: code = NotFound desc = could not find container \"ee1b12f1c3256665291dc3901c1424e480d2778473750514ce96bf8559f48e1a\": container with ID starting with ee1b12f1c3256665291dc3901c1424e480d2778473750514ce96bf8559f48e1a not found: ID does not exist" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.418204 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.585093 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.585164 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.585238 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99c9bd23-bee1-43b2-9989-954f12848017-logs\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.585266 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.585311 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.585332 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tj5pf\" (UniqueName: \"kubernetes.io/projected/99c9bd23-bee1-43b2-9989-954f12848017-kube-api-access-tj5pf\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.585353 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.687119 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.687177 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tj5pf\" (UniqueName: \"kubernetes.io/projected/99c9bd23-bee1-43b2-9989-954f12848017-kube-api-access-tj5pf\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.687206 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.687263 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.687312 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.687378 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99c9bd23-bee1-43b2-9989-954f12848017-logs\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.687406 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.689289 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99c9bd23-bee1-43b2-9989-954f12848017-logs\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.691447 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.692228 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.693761 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.694264 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.696413 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.706110 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tj5pf\" (UniqueName: \"kubernetes.io/projected/99c9bd23-bee1-43b2-9989-954f12848017-kube-api-access-tj5pf\") pod \"watcher-kuttl-api-0\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:39 crc kubenswrapper[4818]: I0930 17:20:39.727132 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:40 crc kubenswrapper[4818]: I0930 17:20:40.035080 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19a48780-51ab-4568-9e7a-60a5fbb70dfa" path="/var/lib/kubelet/pods/19a48780-51ab-4568-9e7a-60a5fbb70dfa/volumes" Sep 30 17:20:40 crc kubenswrapper[4818]: I0930 17:20:40.173033 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:20:40 crc kubenswrapper[4818]: I0930 17:20:40.355265 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"99c9bd23-bee1-43b2-9989-954f12848017","Type":"ContainerStarted","Data":"5220479cca9634f18dfc73eaae800d4be1434bd330cf24cb74b7d1e91e35b7b8"} Sep 30 17:20:40 crc kubenswrapper[4818]: I0930 17:20:40.355574 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"99c9bd23-bee1-43b2-9989-954f12848017","Type":"ContainerStarted","Data":"0dbe1f6e6fa8c9cc1b39eac9ec9df5396e0a90402dd3b884ce3f22bc802607dd"} Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.319508 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.439787 4818 generic.go:334] "Generic (PLEG): container finished" podID="7ebe0898-feb2-4e12-b4be-efad66862264" containerID="a00f7eea40e81f5402882576871a0e6df8d636b0158eacc93f55cc8bab82fefa" exitCode=0 Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.439859 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7ebe0898-feb2-4e12-b4be-efad66862264","Type":"ContainerDied","Data":"a00f7eea40e81f5402882576871a0e6df8d636b0158eacc93f55cc8bab82fefa"} Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.443714 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"99c9bd23-bee1-43b2-9989-954f12848017","Type":"ContainerStarted","Data":"903e29fc63689a5cc07df94d2b565246f2db926c0d41f79813bd93ca68d3c6f3"} Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.443786 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.474229 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=2.474188095 podStartE2EDuration="2.474188095s" podCreationTimestamp="2025-09-30 17:20:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:20:41.466255541 +0000 UTC m=+1288.220527367" watchObservedRunningTime="2025-09-30 17:20:41.474188095 +0000 UTC m=+1288.228459911" Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.571654 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.715158 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-ceilometer-tls-certs\") pod \"7ebe0898-feb2-4e12-b4be-efad66862264\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.715259 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-combined-ca-bundle\") pod \"7ebe0898-feb2-4e12-b4be-efad66862264\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.715300 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lr5jh\" (UniqueName: \"kubernetes.io/projected/7ebe0898-feb2-4e12-b4be-efad66862264-kube-api-access-lr5jh\") pod \"7ebe0898-feb2-4e12-b4be-efad66862264\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.715353 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-scripts\") pod \"7ebe0898-feb2-4e12-b4be-efad66862264\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.715425 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-sg-core-conf-yaml\") pod \"7ebe0898-feb2-4e12-b4be-efad66862264\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.715473 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ebe0898-feb2-4e12-b4be-efad66862264-log-httpd\") pod \"7ebe0898-feb2-4e12-b4be-efad66862264\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.715517 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ebe0898-feb2-4e12-b4be-efad66862264-run-httpd\") pod \"7ebe0898-feb2-4e12-b4be-efad66862264\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.715544 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-config-data\") pod \"7ebe0898-feb2-4e12-b4be-efad66862264\" (UID: \"7ebe0898-feb2-4e12-b4be-efad66862264\") " Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.715912 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7ebe0898-feb2-4e12-b4be-efad66862264-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7ebe0898-feb2-4e12-b4be-efad66862264" (UID: "7ebe0898-feb2-4e12-b4be-efad66862264"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.716048 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7ebe0898-feb2-4e12-b4be-efad66862264-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7ebe0898-feb2-4e12-b4be-efad66862264" (UID: "7ebe0898-feb2-4e12-b4be-efad66862264"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.722083 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ebe0898-feb2-4e12-b4be-efad66862264-kube-api-access-lr5jh" (OuterVolumeSpecName: "kube-api-access-lr5jh") pod "7ebe0898-feb2-4e12-b4be-efad66862264" (UID: "7ebe0898-feb2-4e12-b4be-efad66862264"). InnerVolumeSpecName "kube-api-access-lr5jh". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.751036 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7ebe0898-feb2-4e12-b4be-efad66862264" (UID: "7ebe0898-feb2-4e12-b4be-efad66862264"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.751245 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-scripts" (OuterVolumeSpecName: "scripts") pod "7ebe0898-feb2-4e12-b4be-efad66862264" (UID: "7ebe0898-feb2-4e12-b4be-efad66862264"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.771022 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "7ebe0898-feb2-4e12-b4be-efad66862264" (UID: "7ebe0898-feb2-4e12-b4be-efad66862264"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.783227 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7ebe0898-feb2-4e12-b4be-efad66862264" (UID: "7ebe0898-feb2-4e12-b4be-efad66862264"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.807427 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-config-data" (OuterVolumeSpecName: "config-data") pod "7ebe0898-feb2-4e12-b4be-efad66862264" (UID: "7ebe0898-feb2-4e12-b4be-efad66862264"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.817007 4818 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.817234 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.817347 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lr5jh\" (UniqueName: \"kubernetes.io/projected/7ebe0898-feb2-4e12-b4be-efad66862264-kube-api-access-lr5jh\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.817453 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.817535 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.817645 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ebe0898-feb2-4e12-b4be-efad66862264-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.817759 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ebe0898-feb2-4e12-b4be-efad66862264-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:41 crc kubenswrapper[4818]: I0930 17:20:41.817834 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ebe0898-feb2-4e12-b4be-efad66862264-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.454380 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7ebe0898-feb2-4e12-b4be-efad66862264","Type":"ContainerDied","Data":"e59f413a682e1b65c935fa30bbe4dd32113869a69a79c87ba1ee5c47589a37b0"} Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.454445 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.454520 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="99c9bd23-bee1-43b2-9989-954f12848017" containerName="watcher-kuttl-api-log" containerID="cri-o://5220479cca9634f18dfc73eaae800d4be1434bd330cf24cb74b7d1e91e35b7b8" gracePeriod=30 Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.454868 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="99c9bd23-bee1-43b2-9989-954f12848017" containerName="watcher-api" containerID="cri-o://903e29fc63689a5cc07df94d2b565246f2db926c0d41f79813bd93ca68d3c6f3" gracePeriod=30 Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.454450 4818 scope.go:117] "RemoveContainer" containerID="54c063863ba6f3ba7892ac6f17e14f5e56e0254f618fececdb087b274c254251" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.459986 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="99c9bd23-bee1-43b2-9989-954f12848017" containerName="watcher-api" probeResult="failure" output="Get \"https://10.217.0.145:9322/\": EOF" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.461882 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="99c9bd23-bee1-43b2-9989-954f12848017" containerName="watcher-api" probeResult="failure" output="Get \"https://10.217.0.145:9322/\": read tcp 10.217.0.2:47826->10.217.0.145:9322: read: connection reset by peer" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.480059 4818 scope.go:117] "RemoveContainer" containerID="2a44ba961fa006fdd17ee3b0a8afe170045f3d05fe030ea957848f0979747f7d" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.491503 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.500219 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.507723 4818 scope.go:117] "RemoveContainer" containerID="a00f7eea40e81f5402882576871a0e6df8d636b0158eacc93f55cc8bab82fefa" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.526212 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:20:42 crc kubenswrapper[4818]: E0930 17:20:42.526619 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ebe0898-feb2-4e12-b4be-efad66862264" containerName="ceilometer-notification-agent" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.526639 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ebe0898-feb2-4e12-b4be-efad66862264" containerName="ceilometer-notification-agent" Sep 30 17:20:42 crc kubenswrapper[4818]: E0930 17:20:42.526652 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ebe0898-feb2-4e12-b4be-efad66862264" containerName="proxy-httpd" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.526662 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ebe0898-feb2-4e12-b4be-efad66862264" containerName="proxy-httpd" Sep 30 17:20:42 crc kubenswrapper[4818]: E0930 17:20:42.526683 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ebe0898-feb2-4e12-b4be-efad66862264" containerName="ceilometer-central-agent" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.526691 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ebe0898-feb2-4e12-b4be-efad66862264" containerName="ceilometer-central-agent" Sep 30 17:20:42 crc kubenswrapper[4818]: E0930 17:20:42.526718 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ebe0898-feb2-4e12-b4be-efad66862264" containerName="sg-core" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.526726 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ebe0898-feb2-4e12-b4be-efad66862264" containerName="sg-core" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.526943 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ebe0898-feb2-4e12-b4be-efad66862264" containerName="sg-core" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.526963 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ebe0898-feb2-4e12-b4be-efad66862264" containerName="proxy-httpd" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.526985 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ebe0898-feb2-4e12-b4be-efad66862264" containerName="ceilometer-central-agent" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.527000 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ebe0898-feb2-4e12-b4be-efad66862264" containerName="ceilometer-notification-agent" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.528804 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.531212 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.531455 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.531656 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.549153 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.550329 4818 scope.go:117] "RemoveContainer" containerID="73d7561c0e62697f0ebfd4b2638443d8e7dfa3385b5d5a188f830b1880e7d6f7" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.631873 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tj9xl\" (UniqueName: \"kubernetes.io/projected/38a58cc6-4709-446f-940d-77fcba122de6-kube-api-access-tj9xl\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.631960 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38a58cc6-4709-446f-940d-77fcba122de6-log-httpd\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.632017 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.632072 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-scripts\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.632189 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-config-data\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.632229 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38a58cc6-4709-446f-940d-77fcba122de6-run-httpd\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.632248 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.633024 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.735015 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tj9xl\" (UniqueName: \"kubernetes.io/projected/38a58cc6-4709-446f-940d-77fcba122de6-kube-api-access-tj9xl\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.735071 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38a58cc6-4709-446f-940d-77fcba122de6-log-httpd\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.735125 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.735175 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-scripts\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.735206 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-config-data\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.735222 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38a58cc6-4709-446f-940d-77fcba122de6-run-httpd\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.735237 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.735270 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.735898 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38a58cc6-4709-446f-940d-77fcba122de6-run-httpd\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.736251 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38a58cc6-4709-446f-940d-77fcba122de6-log-httpd\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.741204 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.741544 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.741637 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.743137 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-scripts\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.744808 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-config-data\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.756743 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tj9xl\" (UniqueName: \"kubernetes.io/projected/38a58cc6-4709-446f-940d-77fcba122de6-kube-api-access-tj9xl\") pod \"ceilometer-0\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:42 crc kubenswrapper[4818]: I0930 17:20:42.900506 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:43 crc kubenswrapper[4818]: I0930 17:20:43.391752 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:20:43 crc kubenswrapper[4818]: W0930 17:20:43.401097 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod38a58cc6_4709_446f_940d_77fcba122de6.slice/crio-26728eeea397bc668212de54e2585d0d187c135e22f7379fd915a88cf5b89cc9 WatchSource:0}: Error finding container 26728eeea397bc668212de54e2585d0d187c135e22f7379fd915a88cf5b89cc9: Status 404 returned error can't find the container with id 26728eeea397bc668212de54e2585d0d187c135e22f7379fd915a88cf5b89cc9 Sep 30 17:20:43 crc kubenswrapper[4818]: I0930 17:20:43.462120 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38a58cc6-4709-446f-940d-77fcba122de6","Type":"ContainerStarted","Data":"26728eeea397bc668212de54e2585d0d187c135e22f7379fd915a88cf5b89cc9"} Sep 30 17:20:43 crc kubenswrapper[4818]: I0930 17:20:43.464881 4818 generic.go:334] "Generic (PLEG): container finished" podID="99c9bd23-bee1-43b2-9989-954f12848017" containerID="5220479cca9634f18dfc73eaae800d4be1434bd330cf24cb74b7d1e91e35b7b8" exitCode=143 Sep 30 17:20:43 crc kubenswrapper[4818]: I0930 17:20:43.464976 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"99c9bd23-bee1-43b2-9989-954f12848017","Type":"ContainerDied","Data":"5220479cca9634f18dfc73eaae800d4be1434bd330cf24cb74b7d1e91e35b7b8"} Sep 30 17:20:43 crc kubenswrapper[4818]: I0930 17:20:43.706470 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="19a48780-51ab-4568-9e7a-60a5fbb70dfa" containerName="watcher-api" probeResult="failure" output="Get \"https://10.217.0.142:9322/\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Sep 30 17:20:43 crc kubenswrapper[4818]: I0930 17:20:43.706587 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="19a48780-51ab-4568-9e7a-60a5fbb70dfa" containerName="watcher-kuttl-api-log" probeResult="failure" output="Get \"https://10.217.0.142:9322/\": context deadline exceeded" Sep 30 17:20:44 crc kubenswrapper[4818]: I0930 17:20:44.044333 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ebe0898-feb2-4e12-b4be-efad66862264" path="/var/lib/kubelet/pods/7ebe0898-feb2-4e12-b4be-efad66862264/volumes" Sep 30 17:20:44 crc kubenswrapper[4818]: I0930 17:20:44.474744 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38a58cc6-4709-446f-940d-77fcba122de6","Type":"ContainerStarted","Data":"0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a"} Sep 30 17:20:44 crc kubenswrapper[4818]: I0930 17:20:44.727852 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.201684 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="99c9bd23-bee1-43b2-9989-954f12848017" containerName="watcher-api" probeResult="failure" output="Get \"https://10.217.0.145:9322/\": read tcp 10.217.0.2:38354->10.217.0.145:9322: read: connection reset by peer" Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.492956 4818 generic.go:334] "Generic (PLEG): container finished" podID="99c9bd23-bee1-43b2-9989-954f12848017" containerID="903e29fc63689a5cc07df94d2b565246f2db926c0d41f79813bd93ca68d3c6f3" exitCode=0 Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.493459 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"99c9bd23-bee1-43b2-9989-954f12848017","Type":"ContainerDied","Data":"903e29fc63689a5cc07df94d2b565246f2db926c0d41f79813bd93ca68d3c6f3"} Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.497050 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38a58cc6-4709-446f-940d-77fcba122de6","Type":"ContainerStarted","Data":"7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d"} Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.497120 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38a58cc6-4709-446f-940d-77fcba122de6","Type":"ContainerStarted","Data":"7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279"} Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.612839 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.699488 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-config-data\") pod \"99c9bd23-bee1-43b2-9989-954f12848017\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.699591 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99c9bd23-bee1-43b2-9989-954f12848017-logs\") pod \"99c9bd23-bee1-43b2-9989-954f12848017\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.699687 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-custom-prometheus-ca\") pod \"99c9bd23-bee1-43b2-9989-954f12848017\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.699728 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-internal-tls-certs\") pod \"99c9bd23-bee1-43b2-9989-954f12848017\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.699749 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-combined-ca-bundle\") pod \"99c9bd23-bee1-43b2-9989-954f12848017\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.699790 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-public-tls-certs\") pod \"99c9bd23-bee1-43b2-9989-954f12848017\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.699813 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tj5pf\" (UniqueName: \"kubernetes.io/projected/99c9bd23-bee1-43b2-9989-954f12848017-kube-api-access-tj5pf\") pod \"99c9bd23-bee1-43b2-9989-954f12848017\" (UID: \"99c9bd23-bee1-43b2-9989-954f12848017\") " Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.701230 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99c9bd23-bee1-43b2-9989-954f12848017-logs" (OuterVolumeSpecName: "logs") pod "99c9bd23-bee1-43b2-9989-954f12848017" (UID: "99c9bd23-bee1-43b2-9989-954f12848017"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.705688 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99c9bd23-bee1-43b2-9989-954f12848017-kube-api-access-tj5pf" (OuterVolumeSpecName: "kube-api-access-tj5pf") pod "99c9bd23-bee1-43b2-9989-954f12848017" (UID: "99c9bd23-bee1-43b2-9989-954f12848017"). InnerVolumeSpecName "kube-api-access-tj5pf". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.725337 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "99c9bd23-bee1-43b2-9989-954f12848017" (UID: "99c9bd23-bee1-43b2-9989-954f12848017"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.728065 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "99c9bd23-bee1-43b2-9989-954f12848017" (UID: "99c9bd23-bee1-43b2-9989-954f12848017"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.746540 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-config-data" (OuterVolumeSpecName: "config-data") pod "99c9bd23-bee1-43b2-9989-954f12848017" (UID: "99c9bd23-bee1-43b2-9989-954f12848017"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.755233 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "99c9bd23-bee1-43b2-9989-954f12848017" (UID: "99c9bd23-bee1-43b2-9989-954f12848017"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.767314 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "99c9bd23-bee1-43b2-9989-954f12848017" (UID: "99c9bd23-bee1-43b2-9989-954f12848017"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.801641 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.801674 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99c9bd23-bee1-43b2-9989-954f12848017-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.801685 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.801697 4818 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.801706 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.801714 4818 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/99c9bd23-bee1-43b2-9989-954f12848017-public-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:45 crc kubenswrapper[4818]: I0930 17:20:45.801723 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tj5pf\" (UniqueName: \"kubernetes.io/projected/99c9bd23-bee1-43b2-9989-954f12848017-kube-api-access-tj5pf\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.513295 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"99c9bd23-bee1-43b2-9989-954f12848017","Type":"ContainerDied","Data":"0dbe1f6e6fa8c9cc1b39eac9ec9df5396e0a90402dd3b884ce3f22bc802607dd"} Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.513365 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.513379 4818 scope.go:117] "RemoveContainer" containerID="903e29fc63689a5cc07df94d2b565246f2db926c0d41f79813bd93ca68d3c6f3" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.544788 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.546763 4818 scope.go:117] "RemoveContainer" containerID="5220479cca9634f18dfc73eaae800d4be1434bd330cf24cb74b7d1e91e35b7b8" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.547856 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.567142 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:20:46 crc kubenswrapper[4818]: E0930 17:20:46.589401 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99c9bd23-bee1-43b2-9989-954f12848017" containerName="watcher-kuttl-api-log" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.589426 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="99c9bd23-bee1-43b2-9989-954f12848017" containerName="watcher-kuttl-api-log" Sep 30 17:20:46 crc kubenswrapper[4818]: E0930 17:20:46.589440 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99c9bd23-bee1-43b2-9989-954f12848017" containerName="watcher-api" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.589446 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="99c9bd23-bee1-43b2-9989-954f12848017" containerName="watcher-api" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.589641 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="99c9bd23-bee1-43b2-9989-954f12848017" containerName="watcher-kuttl-api-log" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.589665 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="99c9bd23-bee1-43b2-9989-954f12848017" containerName="watcher-api" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.590426 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.590508 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.593423 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-watcher-public-svc" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.594132 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.594491 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-watcher-internal-svc" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.613485 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.613544 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.613579 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clgnt\" (UniqueName: \"kubernetes.io/projected/31363877-c2a7-4403-8e5e-c533af995ec8-kube-api-access-clgnt\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.613618 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.613651 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.613708 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31363877-c2a7-4403-8e5e-c533af995ec8-logs\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.613747 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.714840 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.714900 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.714977 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31363877-c2a7-4403-8e5e-c533af995ec8-logs\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.715018 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.715081 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.715111 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.715141 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clgnt\" (UniqueName: \"kubernetes.io/projected/31363877-c2a7-4403-8e5e-c533af995ec8-kube-api-access-clgnt\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.715430 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31363877-c2a7-4403-8e5e-c533af995ec8-logs\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.719827 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.720216 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.720491 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.720731 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.722249 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.735588 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clgnt\" (UniqueName: \"kubernetes.io/projected/31363877-c2a7-4403-8e5e-c533af995ec8-kube-api-access-clgnt\") pod \"watcher-kuttl-api-0\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:46 crc kubenswrapper[4818]: I0930 17:20:46.904465 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:47 crc kubenswrapper[4818]: I0930 17:20:47.463563 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:20:47 crc kubenswrapper[4818]: I0930 17:20:47.532253 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"31363877-c2a7-4403-8e5e-c533af995ec8","Type":"ContainerStarted","Data":"f42f449776ac3197ed4fadbf6da1d7b45a72bf4636fd42baa7af3a11fe75be68"} Sep 30 17:20:48 crc kubenswrapper[4818]: I0930 17:20:48.033620 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99c9bd23-bee1-43b2-9989-954f12848017" path="/var/lib/kubelet/pods/99c9bd23-bee1-43b2-9989-954f12848017/volumes" Sep 30 17:20:48 crc kubenswrapper[4818]: I0930 17:20:48.546564 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38a58cc6-4709-446f-940d-77fcba122de6","Type":"ContainerStarted","Data":"cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5"} Sep 30 17:20:48 crc kubenswrapper[4818]: I0930 17:20:48.547918 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:48 crc kubenswrapper[4818]: I0930 17:20:48.549944 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"31363877-c2a7-4403-8e5e-c533af995ec8","Type":"ContainerStarted","Data":"1a46edcb8c5bb87dc4ed7e2977317ed2a2bbeb86d853ca4665bd23c0f8ec3b73"} Sep 30 17:20:48 crc kubenswrapper[4818]: I0930 17:20:48.549992 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"31363877-c2a7-4403-8e5e-c533af995ec8","Type":"ContainerStarted","Data":"c205079ab1611c270eebbcbf1d91cbaaccde18460a40ec5547291e146dfff0bd"} Sep 30 17:20:48 crc kubenswrapper[4818]: I0930 17:20:48.550247 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:48 crc kubenswrapper[4818]: I0930 17:20:48.581158 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=1.790411033 podStartE2EDuration="6.581136219s" podCreationTimestamp="2025-09-30 17:20:42 +0000 UTC" firstStartedPulling="2025-09-30 17:20:43.40296904 +0000 UTC m=+1290.157240866" lastFinishedPulling="2025-09-30 17:20:48.193694216 +0000 UTC m=+1294.947966052" observedRunningTime="2025-09-30 17:20:48.579609107 +0000 UTC m=+1295.333881003" watchObservedRunningTime="2025-09-30 17:20:48.581136219 +0000 UTC m=+1295.335408055" Sep 30 17:20:48 crc kubenswrapper[4818]: I0930 17:20:48.616626 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=2.616608617 podStartE2EDuration="2.616608617s" podCreationTimestamp="2025-09-30 17:20:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:20:48.610870272 +0000 UTC m=+1295.365142088" watchObservedRunningTime="2025-09-30 17:20:48.616608617 +0000 UTC m=+1295.370880433" Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.177436 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-s746z"] Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.186280 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-s746z"] Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.221263 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcherab9d-account-delete-hrvrb"] Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.222685 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcherab9d-account-delete-hrvrb" Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.234374 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.234610 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="ba9805f8-74cf-4618-b75b-1554d5b3670e" containerName="watcher-decision-engine" containerID="cri-o://3850059c5707f06a13c1e47d4945037b1f534d86cb566de29b9b60ffc72a7605" gracePeriod=30 Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.266464 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-db-create-68b94"] Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.268812 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4ssb\" (UniqueName: \"kubernetes.io/projected/0e0c0455-bde8-4dbe-8184-1052460b33ff-kube-api-access-d4ssb\") pod \"watcherab9d-account-delete-hrvrb\" (UID: \"0e0c0455-bde8-4dbe-8184-1052460b33ff\") " pod="watcher-kuttl-default/watcherab9d-account-delete-hrvrb" Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.272817 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-db-create-68b94"] Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.319441 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcherab9d-account-delete-hrvrb"] Sep 30 17:20:49 crc kubenswrapper[4818]: E0930 17:20:49.320125 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-d4ssb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="watcher-kuttl-default/watcherab9d-account-delete-hrvrb" podUID="0e0c0455-bde8-4dbe-8184-1052460b33ff" Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.334690 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-ab9d-account-create-g26cq"] Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.342997 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-ab9d-account-create-g26cq"] Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.362294 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.371614 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4ssb\" (UniqueName: \"kubernetes.io/projected/0e0c0455-bde8-4dbe-8184-1052460b33ff-kube-api-access-d4ssb\") pod \"watcherab9d-account-delete-hrvrb\" (UID: \"0e0c0455-bde8-4dbe-8184-1052460b33ff\") " pod="watcher-kuttl-default/watcherab9d-account-delete-hrvrb" Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.373087 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcherab9d-account-delete-hrvrb"] Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.392073 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.392276 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f" containerName="watcher-applier" containerID="cri-o://4cfb61bb28330036a5a5f611a9e48f03a22e7bfb2a274c7991ec2de038f41bab" gracePeriod=30 Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.396374 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4ssb\" (UniqueName: \"kubernetes.io/projected/0e0c0455-bde8-4dbe-8184-1052460b33ff-kube-api-access-d4ssb\") pod \"watcherab9d-account-delete-hrvrb\" (UID: \"0e0c0455-bde8-4dbe-8184-1052460b33ff\") " pod="watcher-kuttl-default/watcherab9d-account-delete-hrvrb" Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.557819 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcherab9d-account-delete-hrvrb" Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.558249 4818 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="watcher-kuttl-default/watcher-kuttl-api-0" secret="" err="secret \"watcher-watcher-kuttl-dockercfg-crwgj\" not found" Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.567883 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcherab9d-account-delete-hrvrb" Sep 30 17:20:49 crc kubenswrapper[4818]: E0930 17:20:49.574544 4818 secret.go:188] Couldn't get secret watcher-kuttl-default/watcher-kuttl-api-config-data: secret "watcher-kuttl-api-config-data" not found Sep 30 17:20:49 crc kubenswrapper[4818]: E0930 17:20:49.574609 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-config-data podName:31363877-c2a7-4403-8e5e-c533af995ec8 nodeName:}" failed. No retries permitted until 2025-09-30 17:20:50.074590852 +0000 UTC m=+1296.828862668 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-config-data") pod "watcher-kuttl-api-0" (UID: "31363877-c2a7-4403-8e5e-c533af995ec8") : secret "watcher-kuttl-api-config-data" not found Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.676360 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4ssb\" (UniqueName: \"kubernetes.io/projected/0e0c0455-bde8-4dbe-8184-1052460b33ff-kube-api-access-d4ssb\") pod \"0e0c0455-bde8-4dbe-8184-1052460b33ff\" (UID: \"0e0c0455-bde8-4dbe-8184-1052460b33ff\") " Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.689086 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e0c0455-bde8-4dbe-8184-1052460b33ff-kube-api-access-d4ssb" (OuterVolumeSpecName: "kube-api-access-d4ssb") pod "0e0c0455-bde8-4dbe-8184-1052460b33ff" (UID: "0e0c0455-bde8-4dbe-8184-1052460b33ff"). InnerVolumeSpecName "kube-api-access-d4ssb". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:20:49 crc kubenswrapper[4818]: I0930 17:20:49.778381 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4ssb\" (UniqueName: \"kubernetes.io/projected/0e0c0455-bde8-4dbe-8184-1052460b33ff-kube-api-access-d4ssb\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:50 crc kubenswrapper[4818]: I0930 17:20:50.029756 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1809dbc0-d11c-4fb4-9147-20b55e385130" path="/var/lib/kubelet/pods/1809dbc0-d11c-4fb4-9147-20b55e385130/volumes" Sep 30 17:20:50 crc kubenswrapper[4818]: I0930 17:20:50.030224 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e9a8a99-63a3-4298-84a9-5a5717bf18b7" path="/var/lib/kubelet/pods/3e9a8a99-63a3-4298-84a9-5a5717bf18b7/volumes" Sep 30 17:20:50 crc kubenswrapper[4818]: I0930 17:20:50.030749 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff89dd96-80d3-4dd9-b47d-ea3aac81479d" path="/var/lib/kubelet/pods/ff89dd96-80d3-4dd9-b47d-ea3aac81479d/volumes" Sep 30 17:20:50 crc kubenswrapper[4818]: E0930 17:20:50.084181 4818 secret.go:188] Couldn't get secret watcher-kuttl-default/watcher-kuttl-api-config-data: secret "watcher-kuttl-api-config-data" not found Sep 30 17:20:50 crc kubenswrapper[4818]: E0930 17:20:50.084263 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-config-data podName:31363877-c2a7-4403-8e5e-c533af995ec8 nodeName:}" failed. No retries permitted until 2025-09-30 17:20:51.084240728 +0000 UTC m=+1297.838512624 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-config-data") pod "watcher-kuttl-api-0" (UID: "31363877-c2a7-4403-8e5e-c533af995ec8") : secret "watcher-kuttl-api-config-data" not found Sep 30 17:20:50 crc kubenswrapper[4818]: I0930 17:20:50.564111 4818 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Sep 30 17:20:50 crc kubenswrapper[4818]: I0930 17:20:50.564128 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcherab9d-account-delete-hrvrb" Sep 30 17:20:50 crc kubenswrapper[4818]: I0930 17:20:50.564500 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="31363877-c2a7-4403-8e5e-c533af995ec8" containerName="watcher-kuttl-api-log" containerID="cri-o://c205079ab1611c270eebbcbf1d91cbaaccde18460a40ec5547291e146dfff0bd" gracePeriod=30 Sep 30 17:20:50 crc kubenswrapper[4818]: I0930 17:20:50.564603 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="31363877-c2a7-4403-8e5e-c533af995ec8" containerName="watcher-api" containerID="cri-o://1a46edcb8c5bb87dc4ed7e2977317ed2a2bbeb86d853ca4665bd23c0f8ec3b73" gracePeriod=30 Sep 30 17:20:50 crc kubenswrapper[4818]: I0930 17:20:50.570181 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="31363877-c2a7-4403-8e5e-c533af995ec8" containerName="watcher-api" probeResult="failure" output="Get \"https://10.217.0.147:9322/\": EOF" Sep 30 17:20:50 crc kubenswrapper[4818]: I0930 17:20:50.603262 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcherab9d-account-delete-hrvrb"] Sep 30 17:20:50 crc kubenswrapper[4818]: I0930 17:20:50.612489 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcherab9d-account-delete-hrvrb"] Sep 30 17:20:51 crc kubenswrapper[4818]: E0930 17:20:51.103726 4818 secret.go:188] Couldn't get secret watcher-kuttl-default/watcher-kuttl-api-config-data: secret "watcher-kuttl-api-config-data" not found Sep 30 17:20:51 crc kubenswrapper[4818]: E0930 17:20:51.103795 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-config-data podName:31363877-c2a7-4403-8e5e-c533af995ec8 nodeName:}" failed. No retries permitted until 2025-09-30 17:20:53.103774506 +0000 UTC m=+1299.858046332 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-config-data") pod "watcher-kuttl-api-0" (UID: "31363877-c2a7-4403-8e5e-c533af995ec8") : secret "watcher-kuttl-api-config-data" not found Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.369222 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.406879 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xbm96\" (UniqueName: \"kubernetes.io/projected/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-kube-api-access-xbm96\") pod \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\" (UID: \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\") " Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.407116 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-config-data\") pod \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\" (UID: \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\") " Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.407891 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-logs\") pod \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\" (UID: \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\") " Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.407947 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-combined-ca-bundle\") pod \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\" (UID: \"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f\") " Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.408511 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-logs" (OuterVolumeSpecName: "logs") pod "d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f" (UID: "d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.414911 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-kube-api-access-xbm96" (OuterVolumeSpecName: "kube-api-access-xbm96") pod "d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f" (UID: "d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f"). InnerVolumeSpecName "kube-api-access-xbm96". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.435111 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f" (UID: "d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.466239 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-config-data" (OuterVolumeSpecName: "config-data") pod "d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f" (UID: "d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.509323 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.509365 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.509378 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xbm96\" (UniqueName: \"kubernetes.io/projected/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-kube-api-access-xbm96\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.509387 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.573324 4818 generic.go:334] "Generic (PLEG): container finished" podID="ba9805f8-74cf-4618-b75b-1554d5b3670e" containerID="3850059c5707f06a13c1e47d4945037b1f534d86cb566de29b9b60ffc72a7605" exitCode=0 Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.573407 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"ba9805f8-74cf-4618-b75b-1554d5b3670e","Type":"ContainerDied","Data":"3850059c5707f06a13c1e47d4945037b1f534d86cb566de29b9b60ffc72a7605"} Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.575089 4818 generic.go:334] "Generic (PLEG): container finished" podID="d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f" containerID="4cfb61bb28330036a5a5f611a9e48f03a22e7bfb2a274c7991ec2de038f41bab" exitCode=0 Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.575178 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f","Type":"ContainerDied","Data":"4cfb61bb28330036a5a5f611a9e48f03a22e7bfb2a274c7991ec2de038f41bab"} Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.575205 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f","Type":"ContainerDied","Data":"d19cb3390ecb0fee1a96de926b5c8d2a26f9aba66227e4b3f8f82cfd780488ff"} Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.575227 4818 scope.go:117] "RemoveContainer" containerID="4cfb61bb28330036a5a5f611a9e48f03a22e7bfb2a274c7991ec2de038f41bab" Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.575215 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.578133 4818 generic.go:334] "Generic (PLEG): container finished" podID="31363877-c2a7-4403-8e5e-c533af995ec8" containerID="c205079ab1611c270eebbcbf1d91cbaaccde18460a40ec5547291e146dfff0bd" exitCode=143 Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.578172 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"31363877-c2a7-4403-8e5e-c533af995ec8","Type":"ContainerDied","Data":"c205079ab1611c270eebbcbf1d91cbaaccde18460a40ec5547291e146dfff0bd"} Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.614360 4818 scope.go:117] "RemoveContainer" containerID="4cfb61bb28330036a5a5f611a9e48f03a22e7bfb2a274c7991ec2de038f41bab" Sep 30 17:20:51 crc kubenswrapper[4818]: E0930 17:20:51.614805 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cfb61bb28330036a5a5f611a9e48f03a22e7bfb2a274c7991ec2de038f41bab\": container with ID starting with 4cfb61bb28330036a5a5f611a9e48f03a22e7bfb2a274c7991ec2de038f41bab not found: ID does not exist" containerID="4cfb61bb28330036a5a5f611a9e48f03a22e7bfb2a274c7991ec2de038f41bab" Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.614830 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cfb61bb28330036a5a5f611a9e48f03a22e7bfb2a274c7991ec2de038f41bab"} err="failed to get container status \"4cfb61bb28330036a5a5f611a9e48f03a22e7bfb2a274c7991ec2de038f41bab\": rpc error: code = NotFound desc = could not find container \"4cfb61bb28330036a5a5f611a9e48f03a22e7bfb2a274c7991ec2de038f41bab\": container with ID starting with 4cfb61bb28330036a5a5f611a9e48f03a22e7bfb2a274c7991ec2de038f41bab not found: ID does not exist" Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.618065 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.621790 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.819565 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.870598 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.871412 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="38a58cc6-4709-446f-940d-77fcba122de6" containerName="sg-core" containerID="cri-o://7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d" gracePeriod=30 Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.871458 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="38a58cc6-4709-446f-940d-77fcba122de6" containerName="proxy-httpd" containerID="cri-o://cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5" gracePeriod=30 Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.871471 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="38a58cc6-4709-446f-940d-77fcba122de6" containerName="ceilometer-notification-agent" containerID="cri-o://7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279" gracePeriod=30 Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.871357 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="38a58cc6-4709-446f-940d-77fcba122de6" containerName="ceilometer-central-agent" containerID="cri-o://0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a" gracePeriod=30 Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.905072 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.915369 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ba9805f8-74cf-4618-b75b-1554d5b3670e-custom-prometheus-ca\") pod \"ba9805f8-74cf-4618-b75b-1554d5b3670e\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.915435 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba9805f8-74cf-4618-b75b-1554d5b3670e-config-data\") pod \"ba9805f8-74cf-4618-b75b-1554d5b3670e\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.915477 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba9805f8-74cf-4618-b75b-1554d5b3670e-combined-ca-bundle\") pod \"ba9805f8-74cf-4618-b75b-1554d5b3670e\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.915908 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6tmx\" (UniqueName: \"kubernetes.io/projected/ba9805f8-74cf-4618-b75b-1554d5b3670e-kube-api-access-g6tmx\") pod \"ba9805f8-74cf-4618-b75b-1554d5b3670e\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.916015 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba9805f8-74cf-4618-b75b-1554d5b3670e-logs\") pod \"ba9805f8-74cf-4618-b75b-1554d5b3670e\" (UID: \"ba9805f8-74cf-4618-b75b-1554d5b3670e\") " Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.916658 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba9805f8-74cf-4618-b75b-1554d5b3670e-logs" (OuterVolumeSpecName: "logs") pod "ba9805f8-74cf-4618-b75b-1554d5b3670e" (UID: "ba9805f8-74cf-4618-b75b-1554d5b3670e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.925440 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba9805f8-74cf-4618-b75b-1554d5b3670e-kube-api-access-g6tmx" (OuterVolumeSpecName: "kube-api-access-g6tmx") pod "ba9805f8-74cf-4618-b75b-1554d5b3670e" (UID: "ba9805f8-74cf-4618-b75b-1554d5b3670e"). InnerVolumeSpecName "kube-api-access-g6tmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.936852 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba9805f8-74cf-4618-b75b-1554d5b3670e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ba9805f8-74cf-4618-b75b-1554d5b3670e" (UID: "ba9805f8-74cf-4618-b75b-1554d5b3670e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.939173 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba9805f8-74cf-4618-b75b-1554d5b3670e-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "ba9805f8-74cf-4618-b75b-1554d5b3670e" (UID: "ba9805f8-74cf-4618-b75b-1554d5b3670e"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:51 crc kubenswrapper[4818]: I0930 17:20:51.952203 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba9805f8-74cf-4618-b75b-1554d5b3670e-config-data" (OuterVolumeSpecName: "config-data") pod "ba9805f8-74cf-4618-b75b-1554d5b3670e" (UID: "ba9805f8-74cf-4618-b75b-1554d5b3670e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.017508 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ba9805f8-74cf-4618-b75b-1554d5b3670e-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.017542 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba9805f8-74cf-4618-b75b-1554d5b3670e-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.017550 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba9805f8-74cf-4618-b75b-1554d5b3670e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.017560 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6tmx\" (UniqueName: \"kubernetes.io/projected/ba9805f8-74cf-4618-b75b-1554d5b3670e-kube-api-access-g6tmx\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.017571 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba9805f8-74cf-4618-b75b-1554d5b3670e-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.031277 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e0c0455-bde8-4dbe-8184-1052460b33ff" path="/var/lib/kubelet/pods/0e0c0455-bde8-4dbe-8184-1052460b33ff/volumes" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.031836 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f" path="/var/lib/kubelet/pods/d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f/volumes" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.347319 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="31363877-c2a7-4403-8e5e-c533af995ec8" containerName="watcher-api" probeResult="failure" output="Get \"https://10.217.0.147:9322/\": read tcp 10.217.0.2:47956->10.217.0.147:9322: read: connection reset by peer" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.348430 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="31363877-c2a7-4403-8e5e-c533af995ec8" containerName="watcher-api" probeResult="failure" output="Get \"https://10.217.0.147:9322/\": dial tcp 10.217.0.147:9322: connect: connection refused" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.465555 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.525681 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-combined-ca-bundle\") pod \"38a58cc6-4709-446f-940d-77fcba122de6\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.525802 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38a58cc6-4709-446f-940d-77fcba122de6-log-httpd\") pod \"38a58cc6-4709-446f-940d-77fcba122de6\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.525863 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-scripts\") pod \"38a58cc6-4709-446f-940d-77fcba122de6\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.525976 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-config-data\") pod \"38a58cc6-4709-446f-940d-77fcba122de6\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.526287 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-sg-core-conf-yaml\") pod \"38a58cc6-4709-446f-940d-77fcba122de6\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.526365 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-ceilometer-tls-certs\") pod \"38a58cc6-4709-446f-940d-77fcba122de6\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.526402 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38a58cc6-4709-446f-940d-77fcba122de6-run-httpd\") pod \"38a58cc6-4709-446f-940d-77fcba122de6\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.526454 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tj9xl\" (UniqueName: \"kubernetes.io/projected/38a58cc6-4709-446f-940d-77fcba122de6-kube-api-access-tj9xl\") pod \"38a58cc6-4709-446f-940d-77fcba122de6\" (UID: \"38a58cc6-4709-446f-940d-77fcba122de6\") " Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.528504 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38a58cc6-4709-446f-940d-77fcba122de6-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "38a58cc6-4709-446f-940d-77fcba122de6" (UID: "38a58cc6-4709-446f-940d-77fcba122de6"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.528690 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38a58cc6-4709-446f-940d-77fcba122de6-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "38a58cc6-4709-446f-940d-77fcba122de6" (UID: "38a58cc6-4709-446f-940d-77fcba122de6"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.531835 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38a58cc6-4709-446f-940d-77fcba122de6-kube-api-access-tj9xl" (OuterVolumeSpecName: "kube-api-access-tj9xl") pod "38a58cc6-4709-446f-940d-77fcba122de6" (UID: "38a58cc6-4709-446f-940d-77fcba122de6"). InnerVolumeSpecName "kube-api-access-tj9xl". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.532093 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-scripts" (OuterVolumeSpecName: "scripts") pod "38a58cc6-4709-446f-940d-77fcba122de6" (UID: "38a58cc6-4709-446f-940d-77fcba122de6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.561883 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "38a58cc6-4709-446f-940d-77fcba122de6" (UID: "38a58cc6-4709-446f-940d-77fcba122de6"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.584105 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "38a58cc6-4709-446f-940d-77fcba122de6" (UID: "38a58cc6-4709-446f-940d-77fcba122de6"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.595886 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.595981 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.596035 4818 generic.go:334] "Generic (PLEG): container finished" podID="38a58cc6-4709-446f-940d-77fcba122de6" containerID="cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5" exitCode=0 Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.596069 4818 generic.go:334] "Generic (PLEG): container finished" podID="38a58cc6-4709-446f-940d-77fcba122de6" containerID="7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d" exitCode=2 Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.596094 4818 generic.go:334] "Generic (PLEG): container finished" podID="38a58cc6-4709-446f-940d-77fcba122de6" containerID="7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279" exitCode=0 Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.596096 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38a58cc6-4709-446f-940d-77fcba122de6","Type":"ContainerDied","Data":"cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5"} Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.596137 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38a58cc6-4709-446f-940d-77fcba122de6","Type":"ContainerDied","Data":"7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d"} Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.596152 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38a58cc6-4709-446f-940d-77fcba122de6","Type":"ContainerDied","Data":"7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279"} Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.596166 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38a58cc6-4709-446f-940d-77fcba122de6","Type":"ContainerDied","Data":"0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a"} Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.596106 4818 generic.go:334] "Generic (PLEG): container finished" podID="38a58cc6-4709-446f-940d-77fcba122de6" containerID="0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a" exitCode=0 Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.596147 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.596237 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38a58cc6-4709-446f-940d-77fcba122de6","Type":"ContainerDied","Data":"26728eeea397bc668212de54e2585d0d187c135e22f7379fd915a88cf5b89cc9"} Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.596232 4818 scope.go:117] "RemoveContainer" containerID="cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.598027 4818 generic.go:334] "Generic (PLEG): container finished" podID="31363877-c2a7-4403-8e5e-c533af995ec8" containerID="1a46edcb8c5bb87dc4ed7e2977317ed2a2bbeb86d853ca4665bd23c0f8ec3b73" exitCode=0 Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.598072 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"31363877-c2a7-4403-8e5e-c533af995ec8","Type":"ContainerDied","Data":"1a46edcb8c5bb87dc4ed7e2977317ed2a2bbeb86d853ca4665bd23c0f8ec3b73"} Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.600970 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.600908 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"ba9805f8-74cf-4618-b75b-1554d5b3670e","Type":"ContainerDied","Data":"eb9a41054ef6c2200358517e8f15434ff6549d5bcc4ba44813e56a63bc7631de"} Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.612193 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "38a58cc6-4709-446f-940d-77fcba122de6" (UID: "38a58cc6-4709-446f-940d-77fcba122de6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.624219 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.626006 4818 scope.go:117] "RemoveContainer" containerID="7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.628067 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38a58cc6-4709-446f-940d-77fcba122de6-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.628149 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.628206 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.628258 4818 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.631114 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38a58cc6-4709-446f-940d-77fcba122de6-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.631139 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tj9xl\" (UniqueName: \"kubernetes.io/projected/38a58cc6-4709-446f-940d-77fcba122de6-kube-api-access-tj9xl\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.631148 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.632116 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.634529 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-config-data" (OuterVolumeSpecName: "config-data") pod "38a58cc6-4709-446f-940d-77fcba122de6" (UID: "38a58cc6-4709-446f-940d-77fcba122de6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.656335 4818 scope.go:117] "RemoveContainer" containerID="7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.671351 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.675887 4818 scope.go:117] "RemoveContainer" containerID="0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.732007 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-clgnt\" (UniqueName: \"kubernetes.io/projected/31363877-c2a7-4403-8e5e-c533af995ec8-kube-api-access-clgnt\") pod \"31363877-c2a7-4403-8e5e-c533af995ec8\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.732070 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-combined-ca-bundle\") pod \"31363877-c2a7-4403-8e5e-c533af995ec8\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.732122 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31363877-c2a7-4403-8e5e-c533af995ec8-logs\") pod \"31363877-c2a7-4403-8e5e-c533af995ec8\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.732187 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-internal-tls-certs\") pod \"31363877-c2a7-4403-8e5e-c533af995ec8\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.732270 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-public-tls-certs\") pod \"31363877-c2a7-4403-8e5e-c533af995ec8\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.732297 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-custom-prometheus-ca\") pod \"31363877-c2a7-4403-8e5e-c533af995ec8\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.732356 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-config-data\") pod \"31363877-c2a7-4403-8e5e-c533af995ec8\" (UID: \"31363877-c2a7-4403-8e5e-c533af995ec8\") " Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.732887 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38a58cc6-4709-446f-940d-77fcba122de6-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.735286 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31363877-c2a7-4403-8e5e-c533af995ec8-logs" (OuterVolumeSpecName: "logs") pod "31363877-c2a7-4403-8e5e-c533af995ec8" (UID: "31363877-c2a7-4403-8e5e-c533af995ec8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.739110 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31363877-c2a7-4403-8e5e-c533af995ec8-kube-api-access-clgnt" (OuterVolumeSpecName: "kube-api-access-clgnt") pod "31363877-c2a7-4403-8e5e-c533af995ec8" (UID: "31363877-c2a7-4403-8e5e-c533af995ec8"). InnerVolumeSpecName "kube-api-access-clgnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.762177 4818 scope.go:117] "RemoveContainer" containerID="cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5" Sep 30 17:20:52 crc kubenswrapper[4818]: E0930 17:20:52.763207 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5\": container with ID starting with cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5 not found: ID does not exist" containerID="cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.763237 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5"} err="failed to get container status \"cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5\": rpc error: code = NotFound desc = could not find container \"cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5\": container with ID starting with cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5 not found: ID does not exist" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.763257 4818 scope.go:117] "RemoveContainer" containerID="7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d" Sep 30 17:20:52 crc kubenswrapper[4818]: E0930 17:20:52.763452 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d\": container with ID starting with 7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d not found: ID does not exist" containerID="7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.763469 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d"} err="failed to get container status \"7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d\": rpc error: code = NotFound desc = could not find container \"7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d\": container with ID starting with 7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d not found: ID does not exist" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.763481 4818 scope.go:117] "RemoveContainer" containerID="7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279" Sep 30 17:20:52 crc kubenswrapper[4818]: E0930 17:20:52.763620 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279\": container with ID starting with 7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279 not found: ID does not exist" containerID="7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.763636 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279"} err="failed to get container status \"7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279\": rpc error: code = NotFound desc = could not find container \"7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279\": container with ID starting with 7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279 not found: ID does not exist" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.763646 4818 scope.go:117] "RemoveContainer" containerID="0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a" Sep 30 17:20:52 crc kubenswrapper[4818]: E0930 17:20:52.763794 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a\": container with ID starting with 0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a not found: ID does not exist" containerID="0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.763812 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a"} err="failed to get container status \"0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a\": rpc error: code = NotFound desc = could not find container \"0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a\": container with ID starting with 0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a not found: ID does not exist" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.763824 4818 scope.go:117] "RemoveContainer" containerID="cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.763986 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5"} err="failed to get container status \"cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5\": rpc error: code = NotFound desc = could not find container \"cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5\": container with ID starting with cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5 not found: ID does not exist" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.764009 4818 scope.go:117] "RemoveContainer" containerID="7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.764668 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d"} err="failed to get container status \"7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d\": rpc error: code = NotFound desc = could not find container \"7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d\": container with ID starting with 7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d not found: ID does not exist" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.764700 4818 scope.go:117] "RemoveContainer" containerID="7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.764896 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279"} err="failed to get container status \"7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279\": rpc error: code = NotFound desc = could not find container \"7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279\": container with ID starting with 7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279 not found: ID does not exist" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.764934 4818 scope.go:117] "RemoveContainer" containerID="0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.765116 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a"} err="failed to get container status \"0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a\": rpc error: code = NotFound desc = could not find container \"0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a\": container with ID starting with 0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a not found: ID does not exist" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.765138 4818 scope.go:117] "RemoveContainer" containerID="cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.765344 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5"} err="failed to get container status \"cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5\": rpc error: code = NotFound desc = could not find container \"cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5\": container with ID starting with cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5 not found: ID does not exist" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.765367 4818 scope.go:117] "RemoveContainer" containerID="7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.765531 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d"} err="failed to get container status \"7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d\": rpc error: code = NotFound desc = could not find container \"7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d\": container with ID starting with 7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d not found: ID does not exist" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.765551 4818 scope.go:117] "RemoveContainer" containerID="7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.765702 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279"} err="failed to get container status \"7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279\": rpc error: code = NotFound desc = could not find container \"7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279\": container with ID starting with 7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279 not found: ID does not exist" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.765725 4818 scope.go:117] "RemoveContainer" containerID="0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.765878 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a"} err="failed to get container status \"0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a\": rpc error: code = NotFound desc = could not find container \"0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a\": container with ID starting with 0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a not found: ID does not exist" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.765899 4818 scope.go:117] "RemoveContainer" containerID="cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.766080 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5"} err="failed to get container status \"cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5\": rpc error: code = NotFound desc = could not find container \"cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5\": container with ID starting with cdc63de300b01c8682e3815d8f0df79276ab311889ff78a7ae4502dd6b3bc1d5 not found: ID does not exist" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.766101 4818 scope.go:117] "RemoveContainer" containerID="7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.766296 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d"} err="failed to get container status \"7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d\": rpc error: code = NotFound desc = could not find container \"7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d\": container with ID starting with 7de840a610fcc820dceaca0c67b7cd5d7ac15bbb979d5fa9c2cfa45d3e26d44d not found: ID does not exist" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.766320 4818 scope.go:117] "RemoveContainer" containerID="7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.766618 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279"} err="failed to get container status \"7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279\": rpc error: code = NotFound desc = could not find container \"7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279\": container with ID starting with 7558ce12064376e3f36c94732c3c91742d8be92271df778cb61a2306477eb279 not found: ID does not exist" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.766639 4818 scope.go:117] "RemoveContainer" containerID="0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.766808 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a"} err="failed to get container status \"0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a\": rpc error: code = NotFound desc = could not find container \"0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a\": container with ID starting with 0c613a103cce4d0610480017cb343583946e59dfc2b8fd8f1f66ea169413745a not found: ID does not exist" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.766830 4818 scope.go:117] "RemoveContainer" containerID="3850059c5707f06a13c1e47d4945037b1f534d86cb566de29b9b60ffc72a7605" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.780225 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "31363877-c2a7-4403-8e5e-c533af995ec8" (UID: "31363877-c2a7-4403-8e5e-c533af995ec8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.781196 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "31363877-c2a7-4403-8e5e-c533af995ec8" (UID: "31363877-c2a7-4403-8e5e-c533af995ec8"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.791323 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "31363877-c2a7-4403-8e5e-c533af995ec8" (UID: "31363877-c2a7-4403-8e5e-c533af995ec8"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.797680 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-config-data" (OuterVolumeSpecName: "config-data") pod "31363877-c2a7-4403-8e5e-c533af995ec8" (UID: "31363877-c2a7-4403-8e5e-c533af995ec8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.807288 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "31363877-c2a7-4403-8e5e-c533af995ec8" (UID: "31363877-c2a7-4403-8e5e-c533af995ec8"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.834480 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31363877-c2a7-4403-8e5e-c533af995ec8-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.834517 4818 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.834532 4818 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-public-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.834545 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.834556 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.834567 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-clgnt\" (UniqueName: \"kubernetes.io/projected/31363877-c2a7-4403-8e5e-c533af995ec8-kube-api-access-clgnt\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.834578 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31363877-c2a7-4403-8e5e-c533af995ec8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.933865 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.941034 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.965127 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:20:52 crc kubenswrapper[4818]: E0930 17:20:52.965489 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f" containerName="watcher-applier" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.965501 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f" containerName="watcher-applier" Sep 30 17:20:52 crc kubenswrapper[4818]: E0930 17:20:52.965521 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba9805f8-74cf-4618-b75b-1554d5b3670e" containerName="watcher-decision-engine" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.965528 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba9805f8-74cf-4618-b75b-1554d5b3670e" containerName="watcher-decision-engine" Sep 30 17:20:52 crc kubenswrapper[4818]: E0930 17:20:52.965538 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31363877-c2a7-4403-8e5e-c533af995ec8" containerName="watcher-api" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.965544 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="31363877-c2a7-4403-8e5e-c533af995ec8" containerName="watcher-api" Sep 30 17:20:52 crc kubenswrapper[4818]: E0930 17:20:52.965552 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38a58cc6-4709-446f-940d-77fcba122de6" containerName="ceilometer-notification-agent" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.965559 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="38a58cc6-4709-446f-940d-77fcba122de6" containerName="ceilometer-notification-agent" Sep 30 17:20:52 crc kubenswrapper[4818]: E0930 17:20:52.965570 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31363877-c2a7-4403-8e5e-c533af995ec8" containerName="watcher-kuttl-api-log" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.965578 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="31363877-c2a7-4403-8e5e-c533af995ec8" containerName="watcher-kuttl-api-log" Sep 30 17:20:52 crc kubenswrapper[4818]: E0930 17:20:52.965598 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38a58cc6-4709-446f-940d-77fcba122de6" containerName="ceilometer-central-agent" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.965605 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="38a58cc6-4709-446f-940d-77fcba122de6" containerName="ceilometer-central-agent" Sep 30 17:20:52 crc kubenswrapper[4818]: E0930 17:20:52.965619 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38a58cc6-4709-446f-940d-77fcba122de6" containerName="sg-core" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.965625 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="38a58cc6-4709-446f-940d-77fcba122de6" containerName="sg-core" Sep 30 17:20:52 crc kubenswrapper[4818]: E0930 17:20:52.965640 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38a58cc6-4709-446f-940d-77fcba122de6" containerName="proxy-httpd" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.965647 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="38a58cc6-4709-446f-940d-77fcba122de6" containerName="proxy-httpd" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.965825 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="31363877-c2a7-4403-8e5e-c533af995ec8" containerName="watcher-api" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.965836 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="38a58cc6-4709-446f-940d-77fcba122de6" containerName="ceilometer-notification-agent" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.965850 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba9805f8-74cf-4618-b75b-1554d5b3670e" containerName="watcher-decision-engine" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.965857 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="31363877-c2a7-4403-8e5e-c533af995ec8" containerName="watcher-kuttl-api-log" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.965868 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="38a58cc6-4709-446f-940d-77fcba122de6" containerName="proxy-httpd" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.965873 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="38a58cc6-4709-446f-940d-77fcba122de6" containerName="ceilometer-central-agent" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.965883 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1fec7ef-5e7d-4da1-9ef7-f1b474d1860f" containerName="watcher-applier" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.965892 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="38a58cc6-4709-446f-940d-77fcba122de6" containerName="sg-core" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.967255 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.969397 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.969480 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.970071 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Sep 30 17:20:52 crc kubenswrapper[4818]: I0930 17:20:52.987760 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.038370 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.038434 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbpb4\" (UniqueName: \"kubernetes.io/projected/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-kube-api-access-dbpb4\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.038496 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-config-data\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.038577 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-log-httpd\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.038617 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-scripts\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.038637 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.038658 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-run-httpd\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.038728 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.169172 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-config-data\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.169528 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-log-httpd\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.169613 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-scripts\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.169700 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.169773 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-run-httpd\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.169866 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.169963 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.170023 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-run-httpd\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.169860 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-log-httpd\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.170871 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbpb4\" (UniqueName: \"kubernetes.io/projected/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-kube-api-access-dbpb4\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.175781 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.175830 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.176352 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-scripts\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.181460 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.181726 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-config-data\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.187093 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbpb4\" (UniqueName: \"kubernetes.io/projected/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-kube-api-access-dbpb4\") pod \"ceilometer-0\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.286122 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.622882 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"31363877-c2a7-4403-8e5e-c533af995ec8","Type":"ContainerDied","Data":"f42f449776ac3197ed4fadbf6da1d7b45a72bf4636fd42baa7af3a11fe75be68"} Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.623293 4818 scope.go:117] "RemoveContainer" containerID="1a46edcb8c5bb87dc4ed7e2977317ed2a2bbeb86d853ca4665bd23c0f8ec3b73" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.622955 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.648445 4818 scope.go:117] "RemoveContainer" containerID="c205079ab1611c270eebbcbf1d91cbaaccde18460a40ec5547291e146dfff0bd" Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.658764 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.669000 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:20:53 crc kubenswrapper[4818]: I0930 17:20:53.758125 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:20:54 crc kubenswrapper[4818]: I0930 17:20:54.034821 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31363877-c2a7-4403-8e5e-c533af995ec8" path="/var/lib/kubelet/pods/31363877-c2a7-4403-8e5e-c533af995ec8/volumes" Sep 30 17:20:54 crc kubenswrapper[4818]: I0930 17:20:54.035762 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38a58cc6-4709-446f-940d-77fcba122de6" path="/var/lib/kubelet/pods/38a58cc6-4709-446f-940d-77fcba122de6/volumes" Sep 30 17:20:54 crc kubenswrapper[4818]: I0930 17:20:54.036881 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba9805f8-74cf-4618-b75b-1554d5b3670e" path="/var/lib/kubelet/pods/ba9805f8-74cf-4618-b75b-1554d5b3670e/volumes" Sep 30 17:20:54 crc kubenswrapper[4818]: I0930 17:20:54.297650 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-db-create-q9mdl"] Sep 30 17:20:54 crc kubenswrapper[4818]: I0930 17:20:54.298580 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-q9mdl" Sep 30 17:20:54 crc kubenswrapper[4818]: I0930 17:20:54.328382 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-q9mdl"] Sep 30 17:20:54 crc kubenswrapper[4818]: I0930 17:20:54.392546 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82hj8\" (UniqueName: \"kubernetes.io/projected/b760ae74-7c53-479c-ba5b-32eded8b3f72-kube-api-access-82hj8\") pod \"watcher-db-create-q9mdl\" (UID: \"b760ae74-7c53-479c-ba5b-32eded8b3f72\") " pod="watcher-kuttl-default/watcher-db-create-q9mdl" Sep 30 17:20:54 crc kubenswrapper[4818]: I0930 17:20:54.495744 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82hj8\" (UniqueName: \"kubernetes.io/projected/b760ae74-7c53-479c-ba5b-32eded8b3f72-kube-api-access-82hj8\") pod \"watcher-db-create-q9mdl\" (UID: \"b760ae74-7c53-479c-ba5b-32eded8b3f72\") " pod="watcher-kuttl-default/watcher-db-create-q9mdl" Sep 30 17:20:54 crc kubenswrapper[4818]: I0930 17:20:54.515480 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82hj8\" (UniqueName: \"kubernetes.io/projected/b760ae74-7c53-479c-ba5b-32eded8b3f72-kube-api-access-82hj8\") pod \"watcher-db-create-q9mdl\" (UID: \"b760ae74-7c53-479c-ba5b-32eded8b3f72\") " pod="watcher-kuttl-default/watcher-db-create-q9mdl" Sep 30 17:20:54 crc kubenswrapper[4818]: I0930 17:20:54.632491 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7fd3d1d6-a87f-43e6-b28f-04bfca248cca","Type":"ContainerStarted","Data":"b07b85ad5459ed0fa5591ff246739e766e015ff6cdca5480094fa97ee20a27e0"} Sep 30 17:20:54 crc kubenswrapper[4818]: I0930 17:20:54.672535 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-q9mdl" Sep 30 17:20:55 crc kubenswrapper[4818]: I0930 17:20:55.162630 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-q9mdl"] Sep 30 17:20:55 crc kubenswrapper[4818]: W0930 17:20:55.190029 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb760ae74_7c53_479c_ba5b_32eded8b3f72.slice/crio-1522f3ed9f345d8109a3d04f4beeffa25421122dca39c765193da4b6bd433e48 WatchSource:0}: Error finding container 1522f3ed9f345d8109a3d04f4beeffa25421122dca39c765193da4b6bd433e48: Status 404 returned error can't find the container with id 1522f3ed9f345d8109a3d04f4beeffa25421122dca39c765193da4b6bd433e48 Sep 30 17:20:55 crc kubenswrapper[4818]: I0930 17:20:55.642189 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7fd3d1d6-a87f-43e6-b28f-04bfca248cca","Type":"ContainerStarted","Data":"0737214de3b55fbeec0a5d396c45279b0073eb842cd0b61acb314e833f95f9c1"} Sep 30 17:20:55 crc kubenswrapper[4818]: I0930 17:20:55.642514 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7fd3d1d6-a87f-43e6-b28f-04bfca248cca","Type":"ContainerStarted","Data":"490d91b1384dabedd5a1540332a7bf90f2c6a450a3a4bc1f18572d6798cfe14d"} Sep 30 17:20:55 crc kubenswrapper[4818]: I0930 17:20:55.644007 4818 generic.go:334] "Generic (PLEG): container finished" podID="b760ae74-7c53-479c-ba5b-32eded8b3f72" containerID="419db01ba387a3cd4de9cfa9061c88c02c19f6944f32028949ecfc1cef57fd4d" exitCode=0 Sep 30 17:20:55 crc kubenswrapper[4818]: I0930 17:20:55.644050 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-q9mdl" event={"ID":"b760ae74-7c53-479c-ba5b-32eded8b3f72","Type":"ContainerDied","Data":"419db01ba387a3cd4de9cfa9061c88c02c19f6944f32028949ecfc1cef57fd4d"} Sep 30 17:20:55 crc kubenswrapper[4818]: I0930 17:20:55.644086 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-q9mdl" event={"ID":"b760ae74-7c53-479c-ba5b-32eded8b3f72","Type":"ContainerStarted","Data":"1522f3ed9f345d8109a3d04f4beeffa25421122dca39c765193da4b6bd433e48"} Sep 30 17:20:56 crc kubenswrapper[4818]: I0930 17:20:56.654797 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7fd3d1d6-a87f-43e6-b28f-04bfca248cca","Type":"ContainerStarted","Data":"4b523efc1a2bc6893f988fcc56a38c0de214f62be1af80bfb8788ed32d6a8311"} Sep 30 17:20:57 crc kubenswrapper[4818]: I0930 17:20:57.082406 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-q9mdl" Sep 30 17:20:57 crc kubenswrapper[4818]: I0930 17:20:57.166286 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82hj8\" (UniqueName: \"kubernetes.io/projected/b760ae74-7c53-479c-ba5b-32eded8b3f72-kube-api-access-82hj8\") pod \"b760ae74-7c53-479c-ba5b-32eded8b3f72\" (UID: \"b760ae74-7c53-479c-ba5b-32eded8b3f72\") " Sep 30 17:20:57 crc kubenswrapper[4818]: I0930 17:20:57.170898 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b760ae74-7c53-479c-ba5b-32eded8b3f72-kube-api-access-82hj8" (OuterVolumeSpecName: "kube-api-access-82hj8") pod "b760ae74-7c53-479c-ba5b-32eded8b3f72" (UID: "b760ae74-7c53-479c-ba5b-32eded8b3f72"). InnerVolumeSpecName "kube-api-access-82hj8". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:20:57 crc kubenswrapper[4818]: I0930 17:20:57.268207 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-82hj8\" (UniqueName: \"kubernetes.io/projected/b760ae74-7c53-479c-ba5b-32eded8b3f72-kube-api-access-82hj8\") on node \"crc\" DevicePath \"\"" Sep 30 17:20:57 crc kubenswrapper[4818]: I0930 17:20:57.668390 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-q9mdl" event={"ID":"b760ae74-7c53-479c-ba5b-32eded8b3f72","Type":"ContainerDied","Data":"1522f3ed9f345d8109a3d04f4beeffa25421122dca39c765193da4b6bd433e48"} Sep 30 17:20:57 crc kubenswrapper[4818]: I0930 17:20:57.668434 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1522f3ed9f345d8109a3d04f4beeffa25421122dca39c765193da4b6bd433e48" Sep 30 17:20:57 crc kubenswrapper[4818]: I0930 17:20:57.668465 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-q9mdl" Sep 30 17:20:58 crc kubenswrapper[4818]: I0930 17:20:58.680299 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7fd3d1d6-a87f-43e6-b28f-04bfca248cca","Type":"ContainerStarted","Data":"26a437f3a78d869099bd08f3a0ffa72083b7c7fbb0acf36ed443e2238d3bfa09"} Sep 30 17:20:58 crc kubenswrapper[4818]: I0930 17:20:58.681289 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:20:58 crc kubenswrapper[4818]: I0930 17:20:58.710335 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=2.70574783 podStartE2EDuration="6.710315274s" podCreationTimestamp="2025-09-30 17:20:52 +0000 UTC" firstStartedPulling="2025-09-30 17:20:53.770713655 +0000 UTC m=+1300.524985471" lastFinishedPulling="2025-09-30 17:20:57.775281089 +0000 UTC m=+1304.529552915" observedRunningTime="2025-09-30 17:20:58.706607784 +0000 UTC m=+1305.460879610" watchObservedRunningTime="2025-09-30 17:20:58.710315274 +0000 UTC m=+1305.464587100" Sep 30 17:21:04 crc kubenswrapper[4818]: I0930 17:21:04.330905 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-490f-account-create-69fj5"] Sep 30 17:21:04 crc kubenswrapper[4818]: E0930 17:21:04.332439 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b760ae74-7c53-479c-ba5b-32eded8b3f72" containerName="mariadb-database-create" Sep 30 17:21:04 crc kubenswrapper[4818]: I0930 17:21:04.332466 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="b760ae74-7c53-479c-ba5b-32eded8b3f72" containerName="mariadb-database-create" Sep 30 17:21:04 crc kubenswrapper[4818]: I0930 17:21:04.333180 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="b760ae74-7c53-479c-ba5b-32eded8b3f72" containerName="mariadb-database-create" Sep 30 17:21:04 crc kubenswrapper[4818]: I0930 17:21:04.334329 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-490f-account-create-69fj5" Sep 30 17:21:04 crc kubenswrapper[4818]: I0930 17:21:04.339155 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-db-secret" Sep 30 17:21:04 crc kubenswrapper[4818]: I0930 17:21:04.344556 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-490f-account-create-69fj5"] Sep 30 17:21:04 crc kubenswrapper[4818]: I0930 17:21:04.379479 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7flh\" (UniqueName: \"kubernetes.io/projected/107dc282-5a61-4890-ba8b-d3251cb7edab-kube-api-access-d7flh\") pod \"watcher-490f-account-create-69fj5\" (UID: \"107dc282-5a61-4890-ba8b-d3251cb7edab\") " pod="watcher-kuttl-default/watcher-490f-account-create-69fj5" Sep 30 17:21:04 crc kubenswrapper[4818]: I0930 17:21:04.481350 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7flh\" (UniqueName: \"kubernetes.io/projected/107dc282-5a61-4890-ba8b-d3251cb7edab-kube-api-access-d7flh\") pod \"watcher-490f-account-create-69fj5\" (UID: \"107dc282-5a61-4890-ba8b-d3251cb7edab\") " pod="watcher-kuttl-default/watcher-490f-account-create-69fj5" Sep 30 17:21:04 crc kubenswrapper[4818]: I0930 17:21:04.527001 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7flh\" (UniqueName: \"kubernetes.io/projected/107dc282-5a61-4890-ba8b-d3251cb7edab-kube-api-access-d7flh\") pod \"watcher-490f-account-create-69fj5\" (UID: \"107dc282-5a61-4890-ba8b-d3251cb7edab\") " pod="watcher-kuttl-default/watcher-490f-account-create-69fj5" Sep 30 17:21:04 crc kubenswrapper[4818]: I0930 17:21:04.673574 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-490f-account-create-69fj5" Sep 30 17:21:05 crc kubenswrapper[4818]: I0930 17:21:05.192993 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-490f-account-create-69fj5"] Sep 30 17:21:05 crc kubenswrapper[4818]: E0930 17:21:05.552873 4818 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod107dc282_5a61_4890_ba8b_d3251cb7edab.slice/crio-conmon-901d3131056e4ad8f7198b406c9332ccc9b826cec978732ee5fe607166a8a5cd.scope\": RecentStats: unable to find data in memory cache]" Sep 30 17:21:05 crc kubenswrapper[4818]: I0930 17:21:05.765953 4818 generic.go:334] "Generic (PLEG): container finished" podID="107dc282-5a61-4890-ba8b-d3251cb7edab" containerID="901d3131056e4ad8f7198b406c9332ccc9b826cec978732ee5fe607166a8a5cd" exitCode=0 Sep 30 17:21:05 crc kubenswrapper[4818]: I0930 17:21:05.766016 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-490f-account-create-69fj5" event={"ID":"107dc282-5a61-4890-ba8b-d3251cb7edab","Type":"ContainerDied","Data":"901d3131056e4ad8f7198b406c9332ccc9b826cec978732ee5fe607166a8a5cd"} Sep 30 17:21:05 crc kubenswrapper[4818]: I0930 17:21:05.766047 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-490f-account-create-69fj5" event={"ID":"107dc282-5a61-4890-ba8b-d3251cb7edab","Type":"ContainerStarted","Data":"3cb8a6a66ddeba8c76cb7fa6c7a6e6e6e9c8d6d9da582b52b01400d68023a264"} Sep 30 17:21:07 crc kubenswrapper[4818]: I0930 17:21:07.254357 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-490f-account-create-69fj5" Sep 30 17:21:07 crc kubenswrapper[4818]: I0930 17:21:07.329594 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d7flh\" (UniqueName: \"kubernetes.io/projected/107dc282-5a61-4890-ba8b-d3251cb7edab-kube-api-access-d7flh\") pod \"107dc282-5a61-4890-ba8b-d3251cb7edab\" (UID: \"107dc282-5a61-4890-ba8b-d3251cb7edab\") " Sep 30 17:21:07 crc kubenswrapper[4818]: I0930 17:21:07.336288 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/107dc282-5a61-4890-ba8b-d3251cb7edab-kube-api-access-d7flh" (OuterVolumeSpecName: "kube-api-access-d7flh") pod "107dc282-5a61-4890-ba8b-d3251cb7edab" (UID: "107dc282-5a61-4890-ba8b-d3251cb7edab"). InnerVolumeSpecName "kube-api-access-d7flh". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:21:07 crc kubenswrapper[4818]: I0930 17:21:07.431622 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d7flh\" (UniqueName: \"kubernetes.io/projected/107dc282-5a61-4890-ba8b-d3251cb7edab-kube-api-access-d7flh\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:07 crc kubenswrapper[4818]: I0930 17:21:07.785667 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-490f-account-create-69fj5" event={"ID":"107dc282-5a61-4890-ba8b-d3251cb7edab","Type":"ContainerDied","Data":"3cb8a6a66ddeba8c76cb7fa6c7a6e6e6e9c8d6d9da582b52b01400d68023a264"} Sep 30 17:21:07 crc kubenswrapper[4818]: I0930 17:21:07.785711 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3cb8a6a66ddeba8c76cb7fa6c7a6e6e6e9c8d6d9da582b52b01400d68023a264" Sep 30 17:21:07 crc kubenswrapper[4818]: I0930 17:21:07.785767 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-490f-account-create-69fj5" Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.574096 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl"] Sep 30 17:21:09 crc kubenswrapper[4818]: E0930 17:21:09.574606 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="107dc282-5a61-4890-ba8b-d3251cb7edab" containerName="mariadb-account-create" Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.574618 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="107dc282-5a61-4890-ba8b-d3251cb7edab" containerName="mariadb-account-create" Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.574799 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="107dc282-5a61-4890-ba8b-d3251cb7edab" containerName="mariadb-account-create" Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.575298 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.586326 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-config-data" Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.586450 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-6c5s6" Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.610611 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl"] Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.765729 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44d49fb6-4def-4dfc-8550-f2d219c0ef64-config-data\") pod \"watcher-kuttl-db-sync-vw4zl\" (UID: \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.766309 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pmdzk\" (UniqueName: \"kubernetes.io/projected/44d49fb6-4def-4dfc-8550-f2d219c0ef64-kube-api-access-pmdzk\") pod \"watcher-kuttl-db-sync-vw4zl\" (UID: \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.766384 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/44d49fb6-4def-4dfc-8550-f2d219c0ef64-db-sync-config-data\") pod \"watcher-kuttl-db-sync-vw4zl\" (UID: \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.766467 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44d49fb6-4def-4dfc-8550-f2d219c0ef64-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-vw4zl\" (UID: \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.868070 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44d49fb6-4def-4dfc-8550-f2d219c0ef64-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-vw4zl\" (UID: \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.868154 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44d49fb6-4def-4dfc-8550-f2d219c0ef64-config-data\") pod \"watcher-kuttl-db-sync-vw4zl\" (UID: \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.868206 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pmdzk\" (UniqueName: \"kubernetes.io/projected/44d49fb6-4def-4dfc-8550-f2d219c0ef64-kube-api-access-pmdzk\") pod \"watcher-kuttl-db-sync-vw4zl\" (UID: \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.869216 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/44d49fb6-4def-4dfc-8550-f2d219c0ef64-db-sync-config-data\") pod \"watcher-kuttl-db-sync-vw4zl\" (UID: \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.873422 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44d49fb6-4def-4dfc-8550-f2d219c0ef64-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-vw4zl\" (UID: \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.873882 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44d49fb6-4def-4dfc-8550-f2d219c0ef64-config-data\") pod \"watcher-kuttl-db-sync-vw4zl\" (UID: \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.875624 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/44d49fb6-4def-4dfc-8550-f2d219c0ef64-db-sync-config-data\") pod \"watcher-kuttl-db-sync-vw4zl\" (UID: \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.897839 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pmdzk\" (UniqueName: \"kubernetes.io/projected/44d49fb6-4def-4dfc-8550-f2d219c0ef64-kube-api-access-pmdzk\") pod \"watcher-kuttl-db-sync-vw4zl\" (UID: \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" Sep 30 17:21:09 crc kubenswrapper[4818]: I0930 17:21:09.911945 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" Sep 30 17:21:10 crc kubenswrapper[4818]: I0930 17:21:10.382579 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl"] Sep 30 17:21:10 crc kubenswrapper[4818]: W0930 17:21:10.391155 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod44d49fb6_4def_4dfc_8550_f2d219c0ef64.slice/crio-d06a25bd5d32c0c4573682759bbe1bd27bf20a00d5ea02f34b735ca937ce840d WatchSource:0}: Error finding container d06a25bd5d32c0c4573682759bbe1bd27bf20a00d5ea02f34b735ca937ce840d: Status 404 returned error can't find the container with id d06a25bd5d32c0c4573682759bbe1bd27bf20a00d5ea02f34b735ca937ce840d Sep 30 17:21:10 crc kubenswrapper[4818]: I0930 17:21:10.811538 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" event={"ID":"44d49fb6-4def-4dfc-8550-f2d219c0ef64","Type":"ContainerStarted","Data":"42b98b75e653a76209ca45f43fa8d2cc49b3c58d6cc238a15a499d9ca8978f8c"} Sep 30 17:21:10 crc kubenswrapper[4818]: I0930 17:21:10.811806 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" event={"ID":"44d49fb6-4def-4dfc-8550-f2d219c0ef64","Type":"ContainerStarted","Data":"d06a25bd5d32c0c4573682759bbe1bd27bf20a00d5ea02f34b735ca937ce840d"} Sep 30 17:21:10 crc kubenswrapper[4818]: I0930 17:21:10.834017 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" podStartSLOduration=1.8339981220000001 podStartE2EDuration="1.833998122s" podCreationTimestamp="2025-09-30 17:21:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:21:10.826437808 +0000 UTC m=+1317.580709634" watchObservedRunningTime="2025-09-30 17:21:10.833998122 +0000 UTC m=+1317.588269958" Sep 30 17:21:13 crc kubenswrapper[4818]: I0930 17:21:13.838619 4818 generic.go:334] "Generic (PLEG): container finished" podID="44d49fb6-4def-4dfc-8550-f2d219c0ef64" containerID="42b98b75e653a76209ca45f43fa8d2cc49b3c58d6cc238a15a499d9ca8978f8c" exitCode=0 Sep 30 17:21:13 crc kubenswrapper[4818]: I0930 17:21:13.838979 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" event={"ID":"44d49fb6-4def-4dfc-8550-f2d219c0ef64","Type":"ContainerDied","Data":"42b98b75e653a76209ca45f43fa8d2cc49b3c58d6cc238a15a499d9ca8978f8c"} Sep 30 17:21:15 crc kubenswrapper[4818]: I0930 17:21:15.294950 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" Sep 30 17:21:15 crc kubenswrapper[4818]: I0930 17:21:15.463015 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44d49fb6-4def-4dfc-8550-f2d219c0ef64-combined-ca-bundle\") pod \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\" (UID: \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\") " Sep 30 17:21:15 crc kubenswrapper[4818]: I0930 17:21:15.463149 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/44d49fb6-4def-4dfc-8550-f2d219c0ef64-db-sync-config-data\") pod \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\" (UID: \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\") " Sep 30 17:21:15 crc kubenswrapper[4818]: I0930 17:21:15.463196 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pmdzk\" (UniqueName: \"kubernetes.io/projected/44d49fb6-4def-4dfc-8550-f2d219c0ef64-kube-api-access-pmdzk\") pod \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\" (UID: \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\") " Sep 30 17:21:15 crc kubenswrapper[4818]: I0930 17:21:15.463259 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44d49fb6-4def-4dfc-8550-f2d219c0ef64-config-data\") pod \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\" (UID: \"44d49fb6-4def-4dfc-8550-f2d219c0ef64\") " Sep 30 17:21:15 crc kubenswrapper[4818]: I0930 17:21:15.467810 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44d49fb6-4def-4dfc-8550-f2d219c0ef64-kube-api-access-pmdzk" (OuterVolumeSpecName: "kube-api-access-pmdzk") pod "44d49fb6-4def-4dfc-8550-f2d219c0ef64" (UID: "44d49fb6-4def-4dfc-8550-f2d219c0ef64"). InnerVolumeSpecName "kube-api-access-pmdzk". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:21:15 crc kubenswrapper[4818]: I0930 17:21:15.485400 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44d49fb6-4def-4dfc-8550-f2d219c0ef64-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "44d49fb6-4def-4dfc-8550-f2d219c0ef64" (UID: "44d49fb6-4def-4dfc-8550-f2d219c0ef64"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:15 crc kubenswrapper[4818]: I0930 17:21:15.493069 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44d49fb6-4def-4dfc-8550-f2d219c0ef64-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "44d49fb6-4def-4dfc-8550-f2d219c0ef64" (UID: "44d49fb6-4def-4dfc-8550-f2d219c0ef64"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:15 crc kubenswrapper[4818]: I0930 17:21:15.510854 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44d49fb6-4def-4dfc-8550-f2d219c0ef64-config-data" (OuterVolumeSpecName: "config-data") pod "44d49fb6-4def-4dfc-8550-f2d219c0ef64" (UID: "44d49fb6-4def-4dfc-8550-f2d219c0ef64"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:15 crc kubenswrapper[4818]: I0930 17:21:15.565179 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44d49fb6-4def-4dfc-8550-f2d219c0ef64-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:15 crc kubenswrapper[4818]: I0930 17:21:15.565222 4818 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/44d49fb6-4def-4dfc-8550-f2d219c0ef64-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:15 crc kubenswrapper[4818]: I0930 17:21:15.565235 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pmdzk\" (UniqueName: \"kubernetes.io/projected/44d49fb6-4def-4dfc-8550-f2d219c0ef64-kube-api-access-pmdzk\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:15 crc kubenswrapper[4818]: I0930 17:21:15.565250 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44d49fb6-4def-4dfc-8550-f2d219c0ef64-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:15 crc kubenswrapper[4818]: I0930 17:21:15.860552 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" event={"ID":"44d49fb6-4def-4dfc-8550-f2d219c0ef64","Type":"ContainerDied","Data":"d06a25bd5d32c0c4573682759bbe1bd27bf20a00d5ea02f34b735ca937ce840d"} Sep 30 17:21:15 crc kubenswrapper[4818]: I0930 17:21:15.860599 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d06a25bd5d32c0c4573682759bbe1bd27bf20a00d5ea02f34b735ca937ce840d" Sep 30 17:21:15 crc kubenswrapper[4818]: I0930 17:21:15.860644 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.208441 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:21:16 crc kubenswrapper[4818]: E0930 17:21:16.208791 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44d49fb6-4def-4dfc-8550-f2d219c0ef64" containerName="watcher-kuttl-db-sync" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.208806 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="44d49fb6-4def-4dfc-8550-f2d219c0ef64" containerName="watcher-kuttl-db-sync" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.208971 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="44d49fb6-4def-4dfc-8550-f2d219c0ef64" containerName="watcher-kuttl-db-sync" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.209793 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.212479 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-6c5s6" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.212822 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-watcher-internal-svc" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.213119 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-watcher-public-svc" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.213247 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.226742 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.236146 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.237167 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.247261 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.256022 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.285736 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42776a44-2347-4224-8f79-82081d8962b7-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.285789 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.285816 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/30127e4a-714f-4302-ba54-90ee862d760a-logs\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.285833 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.285866 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.285886 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvqkc\" (UniqueName: \"kubernetes.io/projected/30127e4a-714f-4302-ba54-90ee862d760a-kube-api-access-bvqkc\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.285906 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fsnnt\" (UniqueName: \"kubernetes.io/projected/42776a44-2347-4224-8f79-82081d8962b7-kube-api-access-fsnnt\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.285958 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.285983 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.286010 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/42776a44-2347-4224-8f79-82081d8962b7-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.286035 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42776a44-2347-4224-8f79-82081d8962b7-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.286067 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42776a44-2347-4224-8f79-82081d8962b7-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.297183 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.299104 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.306180 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.306679 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-applier-config-data" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.387189 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42776a44-2347-4224-8f79-82081d8962b7-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.387245 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.387274 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d794284-d473-467e-acdd-d14a44d28fb1-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"4d794284-d473-467e-acdd-d14a44d28fb1\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.387299 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/30127e4a-714f-4302-ba54-90ee862d760a-logs\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.387318 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.387348 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25c89\" (UniqueName: \"kubernetes.io/projected/4d794284-d473-467e-acdd-d14a44d28fb1-kube-api-access-25c89\") pod \"watcher-kuttl-applier-0\" (UID: \"4d794284-d473-467e-acdd-d14a44d28fb1\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.387370 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.387393 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvqkc\" (UniqueName: \"kubernetes.io/projected/30127e4a-714f-4302-ba54-90ee862d760a-kube-api-access-bvqkc\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.387420 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fsnnt\" (UniqueName: \"kubernetes.io/projected/42776a44-2347-4224-8f79-82081d8962b7-kube-api-access-fsnnt\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.387594 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.387630 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.387654 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/42776a44-2347-4224-8f79-82081d8962b7-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.387681 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42776a44-2347-4224-8f79-82081d8962b7-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.387715 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d794284-d473-467e-acdd-d14a44d28fb1-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"4d794284-d473-467e-acdd-d14a44d28fb1\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.387737 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42776a44-2347-4224-8f79-82081d8962b7-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.387768 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d794284-d473-467e-acdd-d14a44d28fb1-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"4d794284-d473-467e-acdd-d14a44d28fb1\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.387792 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/30127e4a-714f-4302-ba54-90ee862d760a-logs\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.389630 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42776a44-2347-4224-8f79-82081d8962b7-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.392548 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.396043 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42776a44-2347-4224-8f79-82081d8962b7-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.396524 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.396525 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.396956 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/42776a44-2347-4224-8f79-82081d8962b7-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.397531 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42776a44-2347-4224-8f79-82081d8962b7-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.402183 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.403999 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvqkc\" (UniqueName: \"kubernetes.io/projected/30127e4a-714f-4302-ba54-90ee862d760a-kube-api-access-bvqkc\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.414532 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fsnnt\" (UniqueName: \"kubernetes.io/projected/42776a44-2347-4224-8f79-82081d8962b7-kube-api-access-fsnnt\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.421478 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.489361 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d794284-d473-467e-acdd-d14a44d28fb1-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"4d794284-d473-467e-acdd-d14a44d28fb1\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.489407 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d794284-d473-467e-acdd-d14a44d28fb1-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"4d794284-d473-467e-acdd-d14a44d28fb1\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.489450 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d794284-d473-467e-acdd-d14a44d28fb1-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"4d794284-d473-467e-acdd-d14a44d28fb1\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.489487 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25c89\" (UniqueName: \"kubernetes.io/projected/4d794284-d473-467e-acdd-d14a44d28fb1-kube-api-access-25c89\") pod \"watcher-kuttl-applier-0\" (UID: \"4d794284-d473-467e-acdd-d14a44d28fb1\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.490022 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d794284-d473-467e-acdd-d14a44d28fb1-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"4d794284-d473-467e-acdd-d14a44d28fb1\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.492686 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d794284-d473-467e-acdd-d14a44d28fb1-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"4d794284-d473-467e-acdd-d14a44d28fb1\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.493299 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d794284-d473-467e-acdd-d14a44d28fb1-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"4d794284-d473-467e-acdd-d14a44d28fb1\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.510490 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25c89\" (UniqueName: \"kubernetes.io/projected/4d794284-d473-467e-acdd-d14a44d28fb1-kube-api-access-25c89\") pod \"watcher-kuttl-applier-0\" (UID: \"4d794284-d473-467e-acdd-d14a44d28fb1\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.534475 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.549687 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.616457 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:16 crc kubenswrapper[4818]: I0930 17:21:16.997769 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:21:17 crc kubenswrapper[4818]: W0930 17:21:17.007712 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod30127e4a_714f_4302_ba54_90ee862d760a.slice/crio-4ed9e1663354b73d79d80dcba4699fe414b1d73642b67bbfef4d3f05dd9f9d56 WatchSource:0}: Error finding container 4ed9e1663354b73d79d80dcba4699fe414b1d73642b67bbfef4d3f05dd9f9d56: Status 404 returned error can't find the container with id 4ed9e1663354b73d79d80dcba4699fe414b1d73642b67bbfef4d3f05dd9f9d56 Sep 30 17:21:17 crc kubenswrapper[4818]: I0930 17:21:17.070224 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:21:17 crc kubenswrapper[4818]: I0930 17:21:17.137992 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:21:17 crc kubenswrapper[4818]: W0930 17:21:17.166633 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4d794284_d473_467e_acdd_d14a44d28fb1.slice/crio-fff43a38149a963968fce095ef91b0b80d0258d2205faeece4df8fd540c1eac8 WatchSource:0}: Error finding container fff43a38149a963968fce095ef91b0b80d0258d2205faeece4df8fd540c1eac8: Status 404 returned error can't find the container with id fff43a38149a963968fce095ef91b0b80d0258d2205faeece4df8fd540c1eac8 Sep 30 17:21:17 crc kubenswrapper[4818]: I0930 17:21:17.879702 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"4d794284-d473-467e-acdd-d14a44d28fb1","Type":"ContainerStarted","Data":"c35a87f2c82bcd4734b1ad384899e130cb260f6d3020ddf89654444392945791"} Sep 30 17:21:17 crc kubenswrapper[4818]: I0930 17:21:17.879752 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"4d794284-d473-467e-acdd-d14a44d28fb1","Type":"ContainerStarted","Data":"fff43a38149a963968fce095ef91b0b80d0258d2205faeece4df8fd540c1eac8"} Sep 30 17:21:17 crc kubenswrapper[4818]: I0930 17:21:17.881711 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"42776a44-2347-4224-8f79-82081d8962b7","Type":"ContainerStarted","Data":"14bc5352f9568e678899cdff0b8914f538dc95e0140233f75e24d64861c0ef2d"} Sep 30 17:21:17 crc kubenswrapper[4818]: I0930 17:21:17.881776 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"42776a44-2347-4224-8f79-82081d8962b7","Type":"ContainerStarted","Data":"2d807319d6e1a5b66120cd9f815c4760e6d5eeabac912ca1278af577b37e9699"} Sep 30 17:21:17 crc kubenswrapper[4818]: I0930 17:21:17.883983 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"30127e4a-714f-4302-ba54-90ee862d760a","Type":"ContainerStarted","Data":"57fa46e123ef925b9c3cc8a35a87beba3cf3e732e114dc4cb013a1e211e63f29"} Sep 30 17:21:17 crc kubenswrapper[4818]: I0930 17:21:17.884033 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"30127e4a-714f-4302-ba54-90ee862d760a","Type":"ContainerStarted","Data":"76b6083ce1264ede9c24938157b271456a7ad176aa190c0e8f9685f4107134d3"} Sep 30 17:21:17 crc kubenswrapper[4818]: I0930 17:21:17.884052 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"30127e4a-714f-4302-ba54-90ee862d760a","Type":"ContainerStarted","Data":"4ed9e1663354b73d79d80dcba4699fe414b1d73642b67bbfef4d3f05dd9f9d56"} Sep 30 17:21:17 crc kubenswrapper[4818]: I0930 17:21:17.884223 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:17 crc kubenswrapper[4818]: I0930 17:21:17.898511 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podStartSLOduration=1.8984987370000002 podStartE2EDuration="1.898498737s" podCreationTimestamp="2025-09-30 17:21:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:21:17.89600917 +0000 UTC m=+1324.650281026" watchObservedRunningTime="2025-09-30 17:21:17.898498737 +0000 UTC m=+1324.652770553" Sep 30 17:21:17 crc kubenswrapper[4818]: I0930 17:21:17.920905 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=1.920887952 podStartE2EDuration="1.920887952s" podCreationTimestamp="2025-09-30 17:21:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:21:17.917668585 +0000 UTC m=+1324.671940401" watchObservedRunningTime="2025-09-30 17:21:17.920887952 +0000 UTC m=+1324.675159768" Sep 30 17:21:17 crc kubenswrapper[4818]: I0930 17:21:17.948063 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=1.948038146 podStartE2EDuration="1.948038146s" podCreationTimestamp="2025-09-30 17:21:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:21:17.942891667 +0000 UTC m=+1324.697163483" watchObservedRunningTime="2025-09-30 17:21:17.948038146 +0000 UTC m=+1324.702310002" Sep 30 17:21:20 crc kubenswrapper[4818]: I0930 17:21:20.267492 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:21 crc kubenswrapper[4818]: I0930 17:21:21.535131 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:21 crc kubenswrapper[4818]: I0930 17:21:21.617451 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:22 crc kubenswrapper[4818]: I0930 17:21:22.596111 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:21:22 crc kubenswrapper[4818]: I0930 17:21:22.596354 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:21:22 crc kubenswrapper[4818]: I0930 17:21:22.596396 4818 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 17:21:22 crc kubenswrapper[4818]: I0930 17:21:22.597059 4818 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ae5094dfd804c3f512a41e1f23be19d77cd5136dc31ac2ab100aaebcb668c7b1"} pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 30 17:21:22 crc kubenswrapper[4818]: I0930 17:21:22.597110 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" containerID="cri-o://ae5094dfd804c3f512a41e1f23be19d77cd5136dc31ac2ab100aaebcb668c7b1" gracePeriod=600 Sep 30 17:21:22 crc kubenswrapper[4818]: I0930 17:21:22.929760 4818 generic.go:334] "Generic (PLEG): container finished" podID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerID="ae5094dfd804c3f512a41e1f23be19d77cd5136dc31ac2ab100aaebcb668c7b1" exitCode=0 Sep 30 17:21:22 crc kubenswrapper[4818]: I0930 17:21:22.929950 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerDied","Data":"ae5094dfd804c3f512a41e1f23be19d77cd5136dc31ac2ab100aaebcb668c7b1"} Sep 30 17:21:22 crc kubenswrapper[4818]: I0930 17:21:22.930034 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerStarted","Data":"91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233"} Sep 30 17:21:22 crc kubenswrapper[4818]: I0930 17:21:22.930069 4818 scope.go:117] "RemoveContainer" containerID="8f6686d61e096db5e2902b7d245395d3a5ea7e0fa983b9dcf9c5710b1f2ecad9" Sep 30 17:21:23 crc kubenswrapper[4818]: I0930 17:21:23.399434 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:26 crc kubenswrapper[4818]: I0930 17:21:26.535410 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:26 crc kubenswrapper[4818]: I0930 17:21:26.547876 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:26 crc kubenswrapper[4818]: I0930 17:21:26.549981 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:26 crc kubenswrapper[4818]: I0930 17:21:26.586874 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:26 crc kubenswrapper[4818]: I0930 17:21:26.617679 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:26 crc kubenswrapper[4818]: I0930 17:21:26.643672 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:26 crc kubenswrapper[4818]: I0930 17:21:26.976906 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:26 crc kubenswrapper[4818]: I0930 17:21:26.989240 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:27 crc kubenswrapper[4818]: I0930 17:21:27.013224 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:27 crc kubenswrapper[4818]: I0930 17:21:27.027363 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:29 crc kubenswrapper[4818]: I0930 17:21:29.050875 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:21:29 crc kubenswrapper[4818]: I0930 17:21:29.052679 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerName="ceilometer-central-agent" containerID="cri-o://0737214de3b55fbeec0a5d396c45279b0073eb842cd0b61acb314e833f95f9c1" gracePeriod=30 Sep 30 17:21:29 crc kubenswrapper[4818]: I0930 17:21:29.052791 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerName="ceilometer-notification-agent" containerID="cri-o://490d91b1384dabedd5a1540332a7bf90f2c6a450a3a4bc1f18572d6798cfe14d" gracePeriod=30 Sep 30 17:21:29 crc kubenswrapper[4818]: I0930 17:21:29.052769 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerName="proxy-httpd" containerID="cri-o://26a437f3a78d869099bd08f3a0ffa72083b7c7fbb0acf36ed443e2238d3bfa09" gracePeriod=30 Sep 30 17:21:29 crc kubenswrapper[4818]: I0930 17:21:29.052747 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerName="sg-core" containerID="cri-o://4b523efc1a2bc6893f988fcc56a38c0de214f62be1af80bfb8788ed32d6a8311" gracePeriod=30 Sep 30 17:21:30 crc kubenswrapper[4818]: I0930 17:21:30.000277 4818 generic.go:334] "Generic (PLEG): container finished" podID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerID="26a437f3a78d869099bd08f3a0ffa72083b7c7fbb0acf36ed443e2238d3bfa09" exitCode=0 Sep 30 17:21:30 crc kubenswrapper[4818]: I0930 17:21:30.000307 4818 generic.go:334] "Generic (PLEG): container finished" podID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerID="4b523efc1a2bc6893f988fcc56a38c0de214f62be1af80bfb8788ed32d6a8311" exitCode=2 Sep 30 17:21:30 crc kubenswrapper[4818]: I0930 17:21:30.000315 4818 generic.go:334] "Generic (PLEG): container finished" podID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerID="0737214de3b55fbeec0a5d396c45279b0073eb842cd0b61acb314e833f95f9c1" exitCode=0 Sep 30 17:21:30 crc kubenswrapper[4818]: I0930 17:21:30.000348 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7fd3d1d6-a87f-43e6-b28f-04bfca248cca","Type":"ContainerDied","Data":"26a437f3a78d869099bd08f3a0ffa72083b7c7fbb0acf36ed443e2238d3bfa09"} Sep 30 17:21:30 crc kubenswrapper[4818]: I0930 17:21:30.000405 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7fd3d1d6-a87f-43e6-b28f-04bfca248cca","Type":"ContainerDied","Data":"4b523efc1a2bc6893f988fcc56a38c0de214f62be1af80bfb8788ed32d6a8311"} Sep 30 17:21:30 crc kubenswrapper[4818]: I0930 17:21:30.000419 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7fd3d1d6-a87f-43e6-b28f-04bfca248cca","Type":"ContainerDied","Data":"0737214de3b55fbeec0a5d396c45279b0073eb842cd0b61acb314e833f95f9c1"} Sep 30 17:21:31 crc kubenswrapper[4818]: I0930 17:21:31.930840 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.036458 4818 generic.go:334] "Generic (PLEG): container finished" podID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerID="490d91b1384dabedd5a1540332a7bf90f2c6a450a3a4bc1f18572d6798cfe14d" exitCode=0 Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.036518 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7fd3d1d6-a87f-43e6-b28f-04bfca248cca","Type":"ContainerDied","Data":"490d91b1384dabedd5a1540332a7bf90f2c6a450a3a4bc1f18572d6798cfe14d"} Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.036820 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"7fd3d1d6-a87f-43e6-b28f-04bfca248cca","Type":"ContainerDied","Data":"b07b85ad5459ed0fa5591ff246739e766e015ff6cdca5480094fa97ee20a27e0"} Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.036532 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.036884 4818 scope.go:117] "RemoveContainer" containerID="26a437f3a78d869099bd08f3a0ffa72083b7c7fbb0acf36ed443e2238d3bfa09" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.052794 4818 scope.go:117] "RemoveContainer" containerID="4b523efc1a2bc6893f988fcc56a38c0de214f62be1af80bfb8788ed32d6a8311" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.068619 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-combined-ca-bundle\") pod \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.068688 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-sg-core-conf-yaml\") pod \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.068767 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-scripts\") pod \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.068809 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbpb4\" (UniqueName: \"kubernetes.io/projected/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-kube-api-access-dbpb4\") pod \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.068853 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-run-httpd\") pod \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.068880 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-config-data\") pod \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.068990 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-log-httpd\") pod \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.069017 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-ceilometer-tls-certs\") pod \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\" (UID: \"7fd3d1d6-a87f-43e6-b28f-04bfca248cca\") " Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.072000 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7fd3d1d6-a87f-43e6-b28f-04bfca248cca" (UID: "7fd3d1d6-a87f-43e6-b28f-04bfca248cca"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.074062 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7fd3d1d6-a87f-43e6-b28f-04bfca248cca" (UID: "7fd3d1d6-a87f-43e6-b28f-04bfca248cca"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.074393 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-kube-api-access-dbpb4" (OuterVolumeSpecName: "kube-api-access-dbpb4") pod "7fd3d1d6-a87f-43e6-b28f-04bfca248cca" (UID: "7fd3d1d6-a87f-43e6-b28f-04bfca248cca"). InnerVolumeSpecName "kube-api-access-dbpb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.074769 4818 scope.go:117] "RemoveContainer" containerID="490d91b1384dabedd5a1540332a7bf90f2c6a450a3a4bc1f18572d6798cfe14d" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.077207 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-scripts" (OuterVolumeSpecName: "scripts") pod "7fd3d1d6-a87f-43e6-b28f-04bfca248cca" (UID: "7fd3d1d6-a87f-43e6-b28f-04bfca248cca"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.095073 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7fd3d1d6-a87f-43e6-b28f-04bfca248cca" (UID: "7fd3d1d6-a87f-43e6-b28f-04bfca248cca"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.157312 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "7fd3d1d6-a87f-43e6-b28f-04bfca248cca" (UID: "7fd3d1d6-a87f-43e6-b28f-04bfca248cca"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.162521 4818 scope.go:117] "RemoveContainer" containerID="0737214de3b55fbeec0a5d396c45279b0073eb842cd0b61acb314e833f95f9c1" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.165853 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7fd3d1d6-a87f-43e6-b28f-04bfca248cca" (UID: "7fd3d1d6-a87f-43e6-b28f-04bfca248cca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.172520 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbpb4\" (UniqueName: \"kubernetes.io/projected/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-kube-api-access-dbpb4\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.172692 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.172728 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.172753 4818 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.172769 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.172783 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.172794 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.179706 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-config-data" (OuterVolumeSpecName: "config-data") pod "7fd3d1d6-a87f-43e6-b28f-04bfca248cca" (UID: "7fd3d1d6-a87f-43e6-b28f-04bfca248cca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.182945 4818 scope.go:117] "RemoveContainer" containerID="26a437f3a78d869099bd08f3a0ffa72083b7c7fbb0acf36ed443e2238d3bfa09" Sep 30 17:21:32 crc kubenswrapper[4818]: E0930 17:21:32.183647 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26a437f3a78d869099bd08f3a0ffa72083b7c7fbb0acf36ed443e2238d3bfa09\": container with ID starting with 26a437f3a78d869099bd08f3a0ffa72083b7c7fbb0acf36ed443e2238d3bfa09 not found: ID does not exist" containerID="26a437f3a78d869099bd08f3a0ffa72083b7c7fbb0acf36ed443e2238d3bfa09" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.183705 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26a437f3a78d869099bd08f3a0ffa72083b7c7fbb0acf36ed443e2238d3bfa09"} err="failed to get container status \"26a437f3a78d869099bd08f3a0ffa72083b7c7fbb0acf36ed443e2238d3bfa09\": rpc error: code = NotFound desc = could not find container \"26a437f3a78d869099bd08f3a0ffa72083b7c7fbb0acf36ed443e2238d3bfa09\": container with ID starting with 26a437f3a78d869099bd08f3a0ffa72083b7c7fbb0acf36ed443e2238d3bfa09 not found: ID does not exist" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.183729 4818 scope.go:117] "RemoveContainer" containerID="4b523efc1a2bc6893f988fcc56a38c0de214f62be1af80bfb8788ed32d6a8311" Sep 30 17:21:32 crc kubenswrapper[4818]: E0930 17:21:32.185175 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b523efc1a2bc6893f988fcc56a38c0de214f62be1af80bfb8788ed32d6a8311\": container with ID starting with 4b523efc1a2bc6893f988fcc56a38c0de214f62be1af80bfb8788ed32d6a8311 not found: ID does not exist" containerID="4b523efc1a2bc6893f988fcc56a38c0de214f62be1af80bfb8788ed32d6a8311" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.185217 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b523efc1a2bc6893f988fcc56a38c0de214f62be1af80bfb8788ed32d6a8311"} err="failed to get container status \"4b523efc1a2bc6893f988fcc56a38c0de214f62be1af80bfb8788ed32d6a8311\": rpc error: code = NotFound desc = could not find container \"4b523efc1a2bc6893f988fcc56a38c0de214f62be1af80bfb8788ed32d6a8311\": container with ID starting with 4b523efc1a2bc6893f988fcc56a38c0de214f62be1af80bfb8788ed32d6a8311 not found: ID does not exist" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.185243 4818 scope.go:117] "RemoveContainer" containerID="490d91b1384dabedd5a1540332a7bf90f2c6a450a3a4bc1f18572d6798cfe14d" Sep 30 17:21:32 crc kubenswrapper[4818]: E0930 17:21:32.186099 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"490d91b1384dabedd5a1540332a7bf90f2c6a450a3a4bc1f18572d6798cfe14d\": container with ID starting with 490d91b1384dabedd5a1540332a7bf90f2c6a450a3a4bc1f18572d6798cfe14d not found: ID does not exist" containerID="490d91b1384dabedd5a1540332a7bf90f2c6a450a3a4bc1f18572d6798cfe14d" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.186127 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"490d91b1384dabedd5a1540332a7bf90f2c6a450a3a4bc1f18572d6798cfe14d"} err="failed to get container status \"490d91b1384dabedd5a1540332a7bf90f2c6a450a3a4bc1f18572d6798cfe14d\": rpc error: code = NotFound desc = could not find container \"490d91b1384dabedd5a1540332a7bf90f2c6a450a3a4bc1f18572d6798cfe14d\": container with ID starting with 490d91b1384dabedd5a1540332a7bf90f2c6a450a3a4bc1f18572d6798cfe14d not found: ID does not exist" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.186140 4818 scope.go:117] "RemoveContainer" containerID="0737214de3b55fbeec0a5d396c45279b0073eb842cd0b61acb314e833f95f9c1" Sep 30 17:21:32 crc kubenswrapper[4818]: E0930 17:21:32.186382 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0737214de3b55fbeec0a5d396c45279b0073eb842cd0b61acb314e833f95f9c1\": container with ID starting with 0737214de3b55fbeec0a5d396c45279b0073eb842cd0b61acb314e833f95f9c1 not found: ID does not exist" containerID="0737214de3b55fbeec0a5d396c45279b0073eb842cd0b61acb314e833f95f9c1" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.186413 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0737214de3b55fbeec0a5d396c45279b0073eb842cd0b61acb314e833f95f9c1"} err="failed to get container status \"0737214de3b55fbeec0a5d396c45279b0073eb842cd0b61acb314e833f95f9c1\": rpc error: code = NotFound desc = could not find container \"0737214de3b55fbeec0a5d396c45279b0073eb842cd0b61acb314e833f95f9c1\": container with ID starting with 0737214de3b55fbeec0a5d396c45279b0073eb842cd0b61acb314e833f95f9c1 not found: ID does not exist" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.274460 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fd3d1d6-a87f-43e6-b28f-04bfca248cca-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.377050 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.385903 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.423065 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:21:32 crc kubenswrapper[4818]: E0930 17:21:32.423395 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerName="sg-core" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.423412 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerName="sg-core" Sep 30 17:21:32 crc kubenswrapper[4818]: E0930 17:21:32.423437 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerName="proxy-httpd" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.423442 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerName="proxy-httpd" Sep 30 17:21:32 crc kubenswrapper[4818]: E0930 17:21:32.423451 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerName="ceilometer-notification-agent" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.423457 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerName="ceilometer-notification-agent" Sep 30 17:21:32 crc kubenswrapper[4818]: E0930 17:21:32.423472 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerName="ceilometer-central-agent" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.423477 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerName="ceilometer-central-agent" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.423639 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerName="ceilometer-notification-agent" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.423659 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerName="sg-core" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.423673 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerName="proxy-httpd" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.423686 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" containerName="ceilometer-central-agent" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.425253 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.429299 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.440869 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.443402 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.443676 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.582710 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5djql\" (UniqueName: \"kubernetes.io/projected/e006d103-d4af-48b0-8189-66893c558088-kube-api-access-5djql\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.582749 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e006d103-d4af-48b0-8189-66893c558088-log-httpd\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.582983 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e006d103-d4af-48b0-8189-66893c558088-run-httpd\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.583105 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-scripts\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.583183 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.583225 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.583265 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-config-data\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.583360 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.685125 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5djql\" (UniqueName: \"kubernetes.io/projected/e006d103-d4af-48b0-8189-66893c558088-kube-api-access-5djql\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.685183 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e006d103-d4af-48b0-8189-66893c558088-log-httpd\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.685236 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e006d103-d4af-48b0-8189-66893c558088-run-httpd\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.685267 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-scripts\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.685293 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.685308 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.685325 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-config-data\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.685358 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.686252 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e006d103-d4af-48b0-8189-66893c558088-log-httpd\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.686364 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e006d103-d4af-48b0-8189-66893c558088-run-httpd\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.689375 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-scripts\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.689484 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-config-data\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.689618 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.691051 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.697701 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.719529 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5djql\" (UniqueName: \"kubernetes.io/projected/e006d103-d4af-48b0-8189-66893c558088-kube-api-access-5djql\") pod \"ceilometer-0\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:32 crc kubenswrapper[4818]: I0930 17:21:32.781176 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:33 crc kubenswrapper[4818]: I0930 17:21:33.220548 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:21:34 crc kubenswrapper[4818]: I0930 17:21:34.028712 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7fd3d1d6-a87f-43e6-b28f-04bfca248cca" path="/var/lib/kubelet/pods/7fd3d1d6-a87f-43e6-b28f-04bfca248cca/volumes" Sep 30 17:21:34 crc kubenswrapper[4818]: I0930 17:21:34.066084 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"e006d103-d4af-48b0-8189-66893c558088","Type":"ContainerStarted","Data":"f82048a141c44bd44aa76e652fc257848ad0531c75812162a4097cd2b2ae4f9f"} Sep 30 17:21:34 crc kubenswrapper[4818]: I0930 17:21:34.066163 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"e006d103-d4af-48b0-8189-66893c558088","Type":"ContainerStarted","Data":"8d32c88c05a6faab0786c8079fee9c840a77fa19bc3b7fda6773a4172077e4bc"} Sep 30 17:21:35 crc kubenswrapper[4818]: I0930 17:21:35.073967 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"e006d103-d4af-48b0-8189-66893c558088","Type":"ContainerStarted","Data":"c524f2d2f57a59650a6d26dcd5f8a2686bdd91e97243a50afe4133e7d58200bf"} Sep 30 17:21:36 crc kubenswrapper[4818]: I0930 17:21:36.123639 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"e006d103-d4af-48b0-8189-66893c558088","Type":"ContainerStarted","Data":"f4a784a107efece76ef07a2c90eefad797dede30f55932f51d0986b70d9004d6"} Sep 30 17:21:37 crc kubenswrapper[4818]: I0930 17:21:37.138353 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"e006d103-d4af-48b0-8189-66893c558088","Type":"ContainerStarted","Data":"6a78201979aa72466fabeaa2fa959aae2d6761bf9606053c086b76a3f7eada56"} Sep 30 17:21:37 crc kubenswrapper[4818]: I0930 17:21:37.139798 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:21:37 crc kubenswrapper[4818]: I0930 17:21:37.170862 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=1.634308111 podStartE2EDuration="5.170844625s" podCreationTimestamp="2025-09-30 17:21:32 +0000 UTC" firstStartedPulling="2025-09-30 17:21:33.230558939 +0000 UTC m=+1339.984830755" lastFinishedPulling="2025-09-30 17:21:36.767095443 +0000 UTC m=+1343.521367269" observedRunningTime="2025-09-30 17:21:37.168191874 +0000 UTC m=+1343.922463690" watchObservedRunningTime="2025-09-30 17:21:37.170844625 +0000 UTC m=+1343.925116441" Sep 30 17:21:40 crc kubenswrapper[4818]: I0930 17:21:40.567899 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:21:40 crc kubenswrapper[4818]: I0930 17:21:40.568515 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="42776a44-2347-4224-8f79-82081d8962b7" containerName="watcher-decision-engine" containerID="cri-o://14bc5352f9568e678899cdff0b8914f538dc95e0140233f75e24d64861c0ef2d" gracePeriod=30 Sep 30 17:21:40 crc kubenswrapper[4818]: I0930 17:21:40.579102 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/memcached-0"] Sep 30 17:21:40 crc kubenswrapper[4818]: I0930 17:21:40.579305 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/memcached-0" podUID="ed2f758c-c27c-4554-bcb1-c8be8a0e2e55" containerName="memcached" containerID="cri-o://a3df436dd4e579476af80c23e04a26cd1b2a4d56e5694dffe2507520eb42dc29" gracePeriod=30 Sep 30 17:21:40 crc kubenswrapper[4818]: E0930 17:21:40.597131 4818 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.24:58514->38.102.83.24:43897: write tcp 38.102.83.24:58514->38.102.83.24:43897: write: broken pipe Sep 30 17:21:40 crc kubenswrapper[4818]: I0930 17:21:40.624527 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:21:40 crc kubenswrapper[4818]: I0930 17:21:40.624725 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="4d794284-d473-467e-acdd-d14a44d28fb1" containerName="watcher-applier" containerID="cri-o://c35a87f2c82bcd4734b1ad384899e130cb260f6d3020ddf89654444392945791" gracePeriod=30 Sep 30 17:21:40 crc kubenswrapper[4818]: I0930 17:21:40.666427 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:21:40 crc kubenswrapper[4818]: I0930 17:21:40.666648 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="30127e4a-714f-4302-ba54-90ee862d760a" containerName="watcher-kuttl-api-log" containerID="cri-o://76b6083ce1264ede9c24938157b271456a7ad176aa190c0e8f9685f4107134d3" gracePeriod=30 Sep 30 17:21:40 crc kubenswrapper[4818]: I0930 17:21:40.666774 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="30127e4a-714f-4302-ba54-90ee862d760a" containerName="watcher-api" containerID="cri-o://57fa46e123ef925b9c3cc8a35a87beba3cf3e732e114dc4cb013a1e211e63f29" gracePeriod=30 Sep 30 17:21:40 crc kubenswrapper[4818]: I0930 17:21:40.749836 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-hjctc"] Sep 30 17:21:40 crc kubenswrapper[4818]: I0930 17:21:40.755500 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-hjctc"] Sep 30 17:21:40 crc kubenswrapper[4818]: I0930 17:21:40.842603 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-2jn4f"] Sep 30 17:21:40 crc kubenswrapper[4818]: I0930 17:21:40.844021 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:40 crc kubenswrapper[4818]: I0930 17:21:40.846957 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-memcached-mtls" Sep 30 17:21:40 crc kubenswrapper[4818]: I0930 17:21:40.861156 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-2jn4f"] Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.022937 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nh7bj\" (UniqueName: \"kubernetes.io/projected/bfb209b6-ed72-4796-92f5-85372aeaf10c-kube-api-access-nh7bj\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.022989 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-credential-keys\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.023007 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-combined-ca-bundle\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.023036 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-cert-memcached-mtls\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.023087 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-scripts\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.023103 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-config-data\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.023150 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-fernet-keys\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.125161 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-scripts\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.125219 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-config-data\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.125289 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-fernet-keys\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.125394 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nh7bj\" (UniqueName: \"kubernetes.io/projected/bfb209b6-ed72-4796-92f5-85372aeaf10c-kube-api-access-nh7bj\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.125425 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-credential-keys\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.125451 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-combined-ca-bundle\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.125516 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-cert-memcached-mtls\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.134503 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-cert-memcached-mtls\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.137410 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-fernet-keys\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.145570 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-credential-keys\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.148560 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-config-data\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.154663 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-combined-ca-bundle\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.163683 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-scripts\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.166823 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nh7bj\" (UniqueName: \"kubernetes.io/projected/bfb209b6-ed72-4796-92f5-85372aeaf10c-kube-api-access-nh7bj\") pod \"keystone-bootstrap-2jn4f\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.182995 4818 generic.go:334] "Generic (PLEG): container finished" podID="ed2f758c-c27c-4554-bcb1-c8be8a0e2e55" containerID="a3df436dd4e579476af80c23e04a26cd1b2a4d56e5694dffe2507520eb42dc29" exitCode=0 Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.183122 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/memcached-0" event={"ID":"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55","Type":"ContainerDied","Data":"a3df436dd4e579476af80c23e04a26cd1b2a4d56e5694dffe2507520eb42dc29"} Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.188211 4818 generic.go:334] "Generic (PLEG): container finished" podID="30127e4a-714f-4302-ba54-90ee862d760a" containerID="76b6083ce1264ede9c24938157b271456a7ad176aa190c0e8f9685f4107134d3" exitCode=143 Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.188266 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"30127e4a-714f-4302-ba54-90ee862d760a","Type":"ContainerDied","Data":"76b6083ce1264ede9c24938157b271456a7ad176aa190c0e8f9685f4107134d3"} Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.229592 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.298649 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.450288 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-combined-ca-bundle\") pod \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.450336 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-memcached-tls-certs\") pod \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.450410 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lb5bk\" (UniqueName: \"kubernetes.io/projected/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-kube-api-access-lb5bk\") pod \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.450496 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-kolla-config\") pod \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.450590 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-config-data\") pod \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\" (UID: \"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55\") " Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.451802 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-config-data" (OuterVolumeSpecName: "config-data") pod "ed2f758c-c27c-4554-bcb1-c8be8a0e2e55" (UID: "ed2f758c-c27c-4554-bcb1-c8be8a0e2e55"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.454412 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "ed2f758c-c27c-4554-bcb1-c8be8a0e2e55" (UID: "ed2f758c-c27c-4554-bcb1-c8be8a0e2e55"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.455413 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-kube-api-access-lb5bk" (OuterVolumeSpecName: "kube-api-access-lb5bk") pod "ed2f758c-c27c-4554-bcb1-c8be8a0e2e55" (UID: "ed2f758c-c27c-4554-bcb1-c8be8a0e2e55"). InnerVolumeSpecName "kube-api-access-lb5bk". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.503908 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ed2f758c-c27c-4554-bcb1-c8be8a0e2e55" (UID: "ed2f758c-c27c-4554-bcb1-c8be8a0e2e55"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.535781 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="30127e4a-714f-4302-ba54-90ee862d760a" containerName="watcher-kuttl-api-log" probeResult="failure" output="Get \"https://10.217.0.154:9322/\": dial tcp 10.217.0.154:9322: connect: connection refused" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.536123 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="30127e4a-714f-4302-ba54-90ee862d760a" containerName="watcher-api" probeResult="failure" output="Get \"https://10.217.0.154:9322/\": dial tcp 10.217.0.154:9322: connect: connection refused" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.539909 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "ed2f758c-c27c-4554-bcb1-c8be8a0e2e55" (UID: "ed2f758c-c27c-4554-bcb1-c8be8a0e2e55"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.552381 4818 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-kolla-config\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.552696 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.552710 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.552724 4818 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.552735 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lb5bk\" (UniqueName: \"kubernetes.io/projected/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55-kube-api-access-lb5bk\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:41 crc kubenswrapper[4818]: E0930 17:21:41.623255 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c35a87f2c82bcd4734b1ad384899e130cb260f6d3020ddf89654444392945791" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 30 17:21:41 crc kubenswrapper[4818]: E0930 17:21:41.624879 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c35a87f2c82bcd4734b1ad384899e130cb260f6d3020ddf89654444392945791" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 30 17:21:41 crc kubenswrapper[4818]: E0930 17:21:41.630125 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c35a87f2c82bcd4734b1ad384899e130cb260f6d3020ddf89654444392945791" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 30 17:21:41 crc kubenswrapper[4818]: E0930 17:21:41.630200 4818 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="4d794284-d473-467e-acdd-d14a44d28fb1" containerName="watcher-applier" Sep 30 17:21:41 crc kubenswrapper[4818]: I0930 17:21:41.847758 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-2jn4f"] Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.062306 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b40b79de-4540-4db4-9468-0c9786456c5a" path="/var/lib/kubelet/pods/b40b79de-4540-4db4-9468-0c9786456c5a/volumes" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.198834 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/memcached-0" event={"ID":"ed2f758c-c27c-4554-bcb1-c8be8a0e2e55","Type":"ContainerDied","Data":"b55f6e239085ca48692772ed9621b73d67ded65fcbe033c2563097c1647f912c"} Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.198865 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.198891 4818 scope.go:117] "RemoveContainer" containerID="a3df436dd4e579476af80c23e04a26cd1b2a4d56e5694dffe2507520eb42dc29" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.199253 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.201377 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"30127e4a-714f-4302-ba54-90ee862d760a","Type":"ContainerDied","Data":"57fa46e123ef925b9c3cc8a35a87beba3cf3e732e114dc4cb013a1e211e63f29"} Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.201368 4818 generic.go:334] "Generic (PLEG): container finished" podID="30127e4a-714f-4302-ba54-90ee862d760a" containerID="57fa46e123ef925b9c3cc8a35a87beba3cf3e732e114dc4cb013a1e211e63f29" exitCode=0 Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.201457 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"30127e4a-714f-4302-ba54-90ee862d760a","Type":"ContainerDied","Data":"4ed9e1663354b73d79d80dcba4699fe414b1d73642b67bbfef4d3f05dd9f9d56"} Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.203232 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" event={"ID":"bfb209b6-ed72-4796-92f5-85372aeaf10c","Type":"ContainerStarted","Data":"5e037268e9dfb0df7da85f9f1244510eaf4496e202b376016151a9c2869714c5"} Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.225987 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/memcached-0"] Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.229711 4818 scope.go:117] "RemoveContainer" containerID="57fa46e123ef925b9c3cc8a35a87beba3cf3e732e114dc4cb013a1e211e63f29" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.252139 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/memcached-0"] Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.268000 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/memcached-0"] Sep 30 17:21:42 crc kubenswrapper[4818]: E0930 17:21:42.268388 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30127e4a-714f-4302-ba54-90ee862d760a" containerName="watcher-api" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.268402 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="30127e4a-714f-4302-ba54-90ee862d760a" containerName="watcher-api" Sep 30 17:21:42 crc kubenswrapper[4818]: E0930 17:21:42.268418 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30127e4a-714f-4302-ba54-90ee862d760a" containerName="watcher-kuttl-api-log" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.268426 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="30127e4a-714f-4302-ba54-90ee862d760a" containerName="watcher-kuttl-api-log" Sep 30 17:21:42 crc kubenswrapper[4818]: E0930 17:21:42.268442 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed2f758c-c27c-4554-bcb1-c8be8a0e2e55" containerName="memcached" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.268449 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed2f758c-c27c-4554-bcb1-c8be8a0e2e55" containerName="memcached" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.268604 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="30127e4a-714f-4302-ba54-90ee862d760a" containerName="watcher-api" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.268619 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed2f758c-c27c-4554-bcb1-c8be8a0e2e55" containerName="memcached" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.268627 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="30127e4a-714f-4302-ba54-90ee862d760a" containerName="watcher-kuttl-api-log" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.269204 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.272641 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"memcached-config-data" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.272685 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-memcached-svc" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.272878 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"memcached-memcached-dockercfg-xj47j" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.288154 4818 scope.go:117] "RemoveContainer" containerID="76b6083ce1264ede9c24938157b271456a7ad176aa190c0e8f9685f4107134d3" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.296373 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/memcached-0"] Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.312152 4818 scope.go:117] "RemoveContainer" containerID="57fa46e123ef925b9c3cc8a35a87beba3cf3e732e114dc4cb013a1e211e63f29" Sep 30 17:21:42 crc kubenswrapper[4818]: E0930 17:21:42.318476 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57fa46e123ef925b9c3cc8a35a87beba3cf3e732e114dc4cb013a1e211e63f29\": container with ID starting with 57fa46e123ef925b9c3cc8a35a87beba3cf3e732e114dc4cb013a1e211e63f29 not found: ID does not exist" containerID="57fa46e123ef925b9c3cc8a35a87beba3cf3e732e114dc4cb013a1e211e63f29" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.318527 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57fa46e123ef925b9c3cc8a35a87beba3cf3e732e114dc4cb013a1e211e63f29"} err="failed to get container status \"57fa46e123ef925b9c3cc8a35a87beba3cf3e732e114dc4cb013a1e211e63f29\": rpc error: code = NotFound desc = could not find container \"57fa46e123ef925b9c3cc8a35a87beba3cf3e732e114dc4cb013a1e211e63f29\": container with ID starting with 57fa46e123ef925b9c3cc8a35a87beba3cf3e732e114dc4cb013a1e211e63f29 not found: ID does not exist" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.318560 4818 scope.go:117] "RemoveContainer" containerID="76b6083ce1264ede9c24938157b271456a7ad176aa190c0e8f9685f4107134d3" Sep 30 17:21:42 crc kubenswrapper[4818]: E0930 17:21:42.318829 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76b6083ce1264ede9c24938157b271456a7ad176aa190c0e8f9685f4107134d3\": container with ID starting with 76b6083ce1264ede9c24938157b271456a7ad176aa190c0e8f9685f4107134d3 not found: ID does not exist" containerID="76b6083ce1264ede9c24938157b271456a7ad176aa190c0e8f9685f4107134d3" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.318856 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76b6083ce1264ede9c24938157b271456a7ad176aa190c0e8f9685f4107134d3"} err="failed to get container status \"76b6083ce1264ede9c24938157b271456a7ad176aa190c0e8f9685f4107134d3\": rpc error: code = NotFound desc = could not find container \"76b6083ce1264ede9c24938157b271456a7ad176aa190c0e8f9685f4107134d3\": container with ID starting with 76b6083ce1264ede9c24938157b271456a7ad176aa190c0e8f9685f4107134d3 not found: ID does not exist" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.386719 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-internal-tls-certs\") pod \"30127e4a-714f-4302-ba54-90ee862d760a\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.386773 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-combined-ca-bundle\") pod \"30127e4a-714f-4302-ba54-90ee862d760a\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.386827 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-config-data\") pod \"30127e4a-714f-4302-ba54-90ee862d760a\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.387013 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvqkc\" (UniqueName: \"kubernetes.io/projected/30127e4a-714f-4302-ba54-90ee862d760a-kube-api-access-bvqkc\") pod \"30127e4a-714f-4302-ba54-90ee862d760a\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.387657 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-custom-prometheus-ca\") pod \"30127e4a-714f-4302-ba54-90ee862d760a\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.387702 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/30127e4a-714f-4302-ba54-90ee862d760a-logs\") pod \"30127e4a-714f-4302-ba54-90ee862d760a\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.387743 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-public-tls-certs\") pod \"30127e4a-714f-4302-ba54-90ee862d760a\" (UID: \"30127e4a-714f-4302-ba54-90ee862d760a\") " Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.388087 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44e411aa-6171-4d94-8791-05a653dee924-combined-ca-bundle\") pod \"memcached-0\" (UID: \"44e411aa-6171-4d94-8791-05a653dee924\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.388128 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmzrk\" (UniqueName: \"kubernetes.io/projected/44e411aa-6171-4d94-8791-05a653dee924-kube-api-access-rmzrk\") pod \"memcached-0\" (UID: \"44e411aa-6171-4d94-8791-05a653dee924\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.388780 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/44e411aa-6171-4d94-8791-05a653dee924-kolla-config\") pod \"memcached-0\" (UID: \"44e411aa-6171-4d94-8791-05a653dee924\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.388843 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/44e411aa-6171-4d94-8791-05a653dee924-config-data\") pod \"memcached-0\" (UID: \"44e411aa-6171-4d94-8791-05a653dee924\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.388944 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/30127e4a-714f-4302-ba54-90ee862d760a-logs" (OuterVolumeSpecName: "logs") pod "30127e4a-714f-4302-ba54-90ee862d760a" (UID: "30127e4a-714f-4302-ba54-90ee862d760a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.388957 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/44e411aa-6171-4d94-8791-05a653dee924-memcached-tls-certs\") pod \"memcached-0\" (UID: \"44e411aa-6171-4d94-8791-05a653dee924\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.389054 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/30127e4a-714f-4302-ba54-90ee862d760a-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.394491 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30127e4a-714f-4302-ba54-90ee862d760a-kube-api-access-bvqkc" (OuterVolumeSpecName: "kube-api-access-bvqkc") pod "30127e4a-714f-4302-ba54-90ee862d760a" (UID: "30127e4a-714f-4302-ba54-90ee862d760a"). InnerVolumeSpecName "kube-api-access-bvqkc". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.412674 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "30127e4a-714f-4302-ba54-90ee862d760a" (UID: "30127e4a-714f-4302-ba54-90ee862d760a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.424178 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "30127e4a-714f-4302-ba54-90ee862d760a" (UID: "30127e4a-714f-4302-ba54-90ee862d760a"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.439847 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "30127e4a-714f-4302-ba54-90ee862d760a" (UID: "30127e4a-714f-4302-ba54-90ee862d760a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.447503 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "30127e4a-714f-4302-ba54-90ee862d760a" (UID: "30127e4a-714f-4302-ba54-90ee862d760a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.454604 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-config-data" (OuterVolumeSpecName: "config-data") pod "30127e4a-714f-4302-ba54-90ee862d760a" (UID: "30127e4a-714f-4302-ba54-90ee862d760a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.490481 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/44e411aa-6171-4d94-8791-05a653dee924-kolla-config\") pod \"memcached-0\" (UID: \"44e411aa-6171-4d94-8791-05a653dee924\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.490555 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/44e411aa-6171-4d94-8791-05a653dee924-config-data\") pod \"memcached-0\" (UID: \"44e411aa-6171-4d94-8791-05a653dee924\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.490601 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/44e411aa-6171-4d94-8791-05a653dee924-memcached-tls-certs\") pod \"memcached-0\" (UID: \"44e411aa-6171-4d94-8791-05a653dee924\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.490625 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44e411aa-6171-4d94-8791-05a653dee924-combined-ca-bundle\") pod \"memcached-0\" (UID: \"44e411aa-6171-4d94-8791-05a653dee924\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.490648 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmzrk\" (UniqueName: \"kubernetes.io/projected/44e411aa-6171-4d94-8791-05a653dee924-kube-api-access-rmzrk\") pod \"memcached-0\" (UID: \"44e411aa-6171-4d94-8791-05a653dee924\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.490715 4818 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.490730 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.490745 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.490757 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bvqkc\" (UniqueName: \"kubernetes.io/projected/30127e4a-714f-4302-ba54-90ee862d760a-kube-api-access-bvqkc\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.490768 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.490779 4818 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/30127e4a-714f-4302-ba54-90ee862d760a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.491244 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/44e411aa-6171-4d94-8791-05a653dee924-kolla-config\") pod \"memcached-0\" (UID: \"44e411aa-6171-4d94-8791-05a653dee924\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.491499 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/44e411aa-6171-4d94-8791-05a653dee924-config-data\") pod \"memcached-0\" (UID: \"44e411aa-6171-4d94-8791-05a653dee924\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.497692 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/44e411aa-6171-4d94-8791-05a653dee924-memcached-tls-certs\") pod \"memcached-0\" (UID: \"44e411aa-6171-4d94-8791-05a653dee924\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.498510 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44e411aa-6171-4d94-8791-05a653dee924-combined-ca-bundle\") pod \"memcached-0\" (UID: \"44e411aa-6171-4d94-8791-05a653dee924\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.506587 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmzrk\" (UniqueName: \"kubernetes.io/projected/44e411aa-6171-4d94-8791-05a653dee924-kube-api-access-rmzrk\") pod \"memcached-0\" (UID: \"44e411aa-6171-4d94-8791-05a653dee924\") " pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:42 crc kubenswrapper[4818]: I0930 17:21:42.593326 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.079664 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/memcached-0"] Sep 30 17:21:43 crc kubenswrapper[4818]: W0930 17:21:43.088613 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod44e411aa_6171_4d94_8791_05a653dee924.slice/crio-456dedd6dde412d745b7b69d264024d2c16211b1c73adcc421396739d55a95fb WatchSource:0}: Error finding container 456dedd6dde412d745b7b69d264024d2c16211b1c73adcc421396739d55a95fb: Status 404 returned error can't find the container with id 456dedd6dde412d745b7b69d264024d2c16211b1c73adcc421396739d55a95fb Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.212188 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" event={"ID":"bfb209b6-ed72-4796-92f5-85372aeaf10c","Type":"ContainerStarted","Data":"bcfea6f115fd374626dc950fd64bd9690dec691f687347dd78a97dbb3a5503a6"} Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.215125 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/memcached-0" event={"ID":"44e411aa-6171-4d94-8791-05a653dee924","Type":"ContainerStarted","Data":"456dedd6dde412d745b7b69d264024d2c16211b1c73adcc421396739d55a95fb"} Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.216209 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.239207 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" podStartSLOduration=3.239189876 podStartE2EDuration="3.239189876s" podCreationTimestamp="2025-09-30 17:21:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:21:43.227578552 +0000 UTC m=+1349.981850378" watchObservedRunningTime="2025-09-30 17:21:43.239189876 +0000 UTC m=+1349.993461692" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.263892 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.280648 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.288098 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.289652 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.292534 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-watcher-public-svc" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.292706 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.292813 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-watcher-internal-svc" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.294835 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.405381 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.405420 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.405447 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.405515 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjvxf\" (UniqueName: \"kubernetes.io/projected/b0a46038-8b31-4e29-b298-46d22ca94d05-kube-api-access-zjvxf\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.405556 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b0a46038-8b31-4e29-b298-46d22ca94d05-logs\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.405611 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.405645 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.405669 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.507623 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.507909 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.507947 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.507990 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjvxf\" (UniqueName: \"kubernetes.io/projected/b0a46038-8b31-4e29-b298-46d22ca94d05-kube-api-access-zjvxf\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.508025 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b0a46038-8b31-4e29-b298-46d22ca94d05-logs\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.508069 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.508098 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.508125 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.509541 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b0a46038-8b31-4e29-b298-46d22ca94d05-logs\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.511340 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.519311 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.519328 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.519383 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.519485 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.519728 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.546599 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjvxf\" (UniqueName: \"kubernetes.io/projected/b0a46038-8b31-4e29-b298-46d22ca94d05-kube-api-access-zjvxf\") pod \"watcher-kuttl-api-0\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.609359 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.767367 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.812983 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d794284-d473-467e-acdd-d14a44d28fb1-logs\") pod \"4d794284-d473-467e-acdd-d14a44d28fb1\" (UID: \"4d794284-d473-467e-acdd-d14a44d28fb1\") " Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.813028 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d794284-d473-467e-acdd-d14a44d28fb1-combined-ca-bundle\") pod \"4d794284-d473-467e-acdd-d14a44d28fb1\" (UID: \"4d794284-d473-467e-acdd-d14a44d28fb1\") " Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.813072 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d794284-d473-467e-acdd-d14a44d28fb1-config-data\") pod \"4d794284-d473-467e-acdd-d14a44d28fb1\" (UID: \"4d794284-d473-467e-acdd-d14a44d28fb1\") " Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.813094 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-25c89\" (UniqueName: \"kubernetes.io/projected/4d794284-d473-467e-acdd-d14a44d28fb1-kube-api-access-25c89\") pod \"4d794284-d473-467e-acdd-d14a44d28fb1\" (UID: \"4d794284-d473-467e-acdd-d14a44d28fb1\") " Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.813660 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d794284-d473-467e-acdd-d14a44d28fb1-logs" (OuterVolumeSpecName: "logs") pod "4d794284-d473-467e-acdd-d14a44d28fb1" (UID: "4d794284-d473-467e-acdd-d14a44d28fb1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.821161 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d794284-d473-467e-acdd-d14a44d28fb1-kube-api-access-25c89" (OuterVolumeSpecName: "kube-api-access-25c89") pod "4d794284-d473-467e-acdd-d14a44d28fb1" (UID: "4d794284-d473-467e-acdd-d14a44d28fb1"). InnerVolumeSpecName "kube-api-access-25c89". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.842509 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d794284-d473-467e-acdd-d14a44d28fb1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4d794284-d473-467e-acdd-d14a44d28fb1" (UID: "4d794284-d473-467e-acdd-d14a44d28fb1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.895032 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d794284-d473-467e-acdd-d14a44d28fb1-config-data" (OuterVolumeSpecName: "config-data") pod "4d794284-d473-467e-acdd-d14a44d28fb1" (UID: "4d794284-d473-467e-acdd-d14a44d28fb1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.914736 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d794284-d473-467e-acdd-d14a44d28fb1-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.914776 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-25c89\" (UniqueName: \"kubernetes.io/projected/4d794284-d473-467e-acdd-d14a44d28fb1-kube-api-access-25c89\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.914786 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d794284-d473-467e-acdd-d14a44d28fb1-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:43 crc kubenswrapper[4818]: I0930 17:21:43.914799 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d794284-d473-467e-acdd-d14a44d28fb1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.037185 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30127e4a-714f-4302-ba54-90ee862d760a" path="/var/lib/kubelet/pods/30127e4a-714f-4302-ba54-90ee862d760a/volumes" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.040381 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed2f758c-c27c-4554-bcb1-c8be8a0e2e55" path="/var/lib/kubelet/pods/ed2f758c-c27c-4554-bcb1-c8be8a0e2e55/volumes" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.090571 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:21:44 crc kubenswrapper[4818]: W0930 17:21:44.095223 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb0a46038_8b31_4e29_b298_46d22ca94d05.slice/crio-e25248ac0dd94a75178ab04e70fa46cb07af5c3f37db5ad5d86b2eaccb73b454 WatchSource:0}: Error finding container e25248ac0dd94a75178ab04e70fa46cb07af5c3f37db5ad5d86b2eaccb73b454: Status 404 returned error can't find the container with id e25248ac0dd94a75178ab04e70fa46cb07af5c3f37db5ad5d86b2eaccb73b454 Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.225545 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/memcached-0" event={"ID":"44e411aa-6171-4d94-8791-05a653dee924","Type":"ContainerStarted","Data":"722a49da9382e5e6fba698d65b596d1e353befebdf47653d8a5acaaf3bff16c4"} Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.225653 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.226959 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b0a46038-8b31-4e29-b298-46d22ca94d05","Type":"ContainerStarted","Data":"e25248ac0dd94a75178ab04e70fa46cb07af5c3f37db5ad5d86b2eaccb73b454"} Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.229091 4818 generic.go:334] "Generic (PLEG): container finished" podID="4d794284-d473-467e-acdd-d14a44d28fb1" containerID="c35a87f2c82bcd4734b1ad384899e130cb260f6d3020ddf89654444392945791" exitCode=0 Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.229173 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.229198 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"4d794284-d473-467e-acdd-d14a44d28fb1","Type":"ContainerDied","Data":"c35a87f2c82bcd4734b1ad384899e130cb260f6d3020ddf89654444392945791"} Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.229243 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"4d794284-d473-467e-acdd-d14a44d28fb1","Type":"ContainerDied","Data":"fff43a38149a963968fce095ef91b0b80d0258d2205faeece4df8fd540c1eac8"} Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.229265 4818 scope.go:117] "RemoveContainer" containerID="c35a87f2c82bcd4734b1ad384899e130cb260f6d3020ddf89654444392945791" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.247520 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/memcached-0" podStartSLOduration=2.24749794 podStartE2EDuration="2.24749794s" podCreationTimestamp="2025-09-30 17:21:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:21:44.241424756 +0000 UTC m=+1350.995696592" watchObservedRunningTime="2025-09-30 17:21:44.24749794 +0000 UTC m=+1351.001769756" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.301955 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.306737 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.326524 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.326803 4818 scope.go:117] "RemoveContainer" containerID="c35a87f2c82bcd4734b1ad384899e130cb260f6d3020ddf89654444392945791" Sep 30 17:21:44 crc kubenswrapper[4818]: E0930 17:21:44.327470 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d794284-d473-467e-acdd-d14a44d28fb1" containerName="watcher-applier" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.327638 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d794284-d473-467e-acdd-d14a44d28fb1" containerName="watcher-applier" Sep 30 17:21:44 crc kubenswrapper[4818]: E0930 17:21:44.328178 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c35a87f2c82bcd4734b1ad384899e130cb260f6d3020ddf89654444392945791\": container with ID starting with c35a87f2c82bcd4734b1ad384899e130cb260f6d3020ddf89654444392945791 not found: ID does not exist" containerID="c35a87f2c82bcd4734b1ad384899e130cb260f6d3020ddf89654444392945791" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.328207 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c35a87f2c82bcd4734b1ad384899e130cb260f6d3020ddf89654444392945791"} err="failed to get container status \"c35a87f2c82bcd4734b1ad384899e130cb260f6d3020ddf89654444392945791\": rpc error: code = NotFound desc = could not find container \"c35a87f2c82bcd4734b1ad384899e130cb260f6d3020ddf89654444392945791\": container with ID starting with c35a87f2c82bcd4734b1ad384899e130cb260f6d3020ddf89654444392945791 not found: ID does not exist" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.328457 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d794284-d473-467e-acdd-d14a44d28fb1" containerName="watcher-applier" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.330120 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.336272 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-applier-config-data" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.337650 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.426547 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a00533a7-cfa1-4094-9d55-8686d2f25d0f-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.426595 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a00533a7-cfa1-4094-9d55-8686d2f25d0f-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.426637 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a00533a7-cfa1-4094-9d55-8686d2f25d0f-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.426765 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a00533a7-cfa1-4094-9d55-8686d2f25d0f-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.427040 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8njzh\" (UniqueName: \"kubernetes.io/projected/a00533a7-cfa1-4094-9d55-8686d2f25d0f-kube-api-access-8njzh\") pod \"watcher-kuttl-applier-0\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.528370 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a00533a7-cfa1-4094-9d55-8686d2f25d0f-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.528412 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a00533a7-cfa1-4094-9d55-8686d2f25d0f-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.528493 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8njzh\" (UniqueName: \"kubernetes.io/projected/a00533a7-cfa1-4094-9d55-8686d2f25d0f-kube-api-access-8njzh\") pod \"watcher-kuttl-applier-0\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.528547 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a00533a7-cfa1-4094-9d55-8686d2f25d0f-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.528566 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a00533a7-cfa1-4094-9d55-8686d2f25d0f-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.560067 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a00533a7-cfa1-4094-9d55-8686d2f25d0f-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.560199 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a00533a7-cfa1-4094-9d55-8686d2f25d0f-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.562822 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a00533a7-cfa1-4094-9d55-8686d2f25d0f-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.614720 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a00533a7-cfa1-4094-9d55-8686d2f25d0f-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.615222 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8njzh\" (UniqueName: \"kubernetes.io/projected/a00533a7-cfa1-4094-9d55-8686d2f25d0f-kube-api-access-8njzh\") pod \"watcher-kuttl-applier-0\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.651036 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.720337 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.731252 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42776a44-2347-4224-8f79-82081d8962b7-combined-ca-bundle\") pod \"42776a44-2347-4224-8f79-82081d8962b7\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.731299 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/42776a44-2347-4224-8f79-82081d8962b7-custom-prometheus-ca\") pod \"42776a44-2347-4224-8f79-82081d8962b7\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.731461 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42776a44-2347-4224-8f79-82081d8962b7-config-data\") pod \"42776a44-2347-4224-8f79-82081d8962b7\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.731496 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fsnnt\" (UniqueName: \"kubernetes.io/projected/42776a44-2347-4224-8f79-82081d8962b7-kube-api-access-fsnnt\") pod \"42776a44-2347-4224-8f79-82081d8962b7\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.731574 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42776a44-2347-4224-8f79-82081d8962b7-logs\") pod \"42776a44-2347-4224-8f79-82081d8962b7\" (UID: \"42776a44-2347-4224-8f79-82081d8962b7\") " Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.732290 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/42776a44-2347-4224-8f79-82081d8962b7-logs" (OuterVolumeSpecName: "logs") pod "42776a44-2347-4224-8f79-82081d8962b7" (UID: "42776a44-2347-4224-8f79-82081d8962b7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.737217 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42776a44-2347-4224-8f79-82081d8962b7-kube-api-access-fsnnt" (OuterVolumeSpecName: "kube-api-access-fsnnt") pod "42776a44-2347-4224-8f79-82081d8962b7" (UID: "42776a44-2347-4224-8f79-82081d8962b7"). InnerVolumeSpecName "kube-api-access-fsnnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.832123 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42776a44-2347-4224-8f79-82081d8962b7-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "42776a44-2347-4224-8f79-82081d8962b7" (UID: "42776a44-2347-4224-8f79-82081d8962b7"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.834544 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fsnnt\" (UniqueName: \"kubernetes.io/projected/42776a44-2347-4224-8f79-82081d8962b7-kube-api-access-fsnnt\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.834570 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42776a44-2347-4224-8f79-82081d8962b7-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.834581 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/42776a44-2347-4224-8f79-82081d8962b7-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.837646 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42776a44-2347-4224-8f79-82081d8962b7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "42776a44-2347-4224-8f79-82081d8962b7" (UID: "42776a44-2347-4224-8f79-82081d8962b7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.846940 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42776a44-2347-4224-8f79-82081d8962b7-config-data" (OuterVolumeSpecName: "config-data") pod "42776a44-2347-4224-8f79-82081d8962b7" (UID: "42776a44-2347-4224-8f79-82081d8962b7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.935410 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42776a44-2347-4224-8f79-82081d8962b7-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:44 crc kubenswrapper[4818]: I0930 17:21:44.935445 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42776a44-2347-4224-8f79-82081d8962b7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.236937 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b0a46038-8b31-4e29-b298-46d22ca94d05","Type":"ContainerStarted","Data":"2d1fbd7ab2b2714bc6eae12dadec3d780610cea92d0ca05ff5234671fac205c0"} Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.237219 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b0a46038-8b31-4e29-b298-46d22ca94d05","Type":"ContainerStarted","Data":"9d1225f09882aadef8f15284c3cc353c7a9860ca95a9e7694ddc3ff04ca0320c"} Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.242344 4818 generic.go:334] "Generic (PLEG): container finished" podID="42776a44-2347-4224-8f79-82081d8962b7" containerID="14bc5352f9568e678899cdff0b8914f538dc95e0140233f75e24d64861c0ef2d" exitCode=0 Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.243013 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.244181 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"42776a44-2347-4224-8f79-82081d8962b7","Type":"ContainerDied","Data":"14bc5352f9568e678899cdff0b8914f538dc95e0140233f75e24d64861c0ef2d"} Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.244249 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"42776a44-2347-4224-8f79-82081d8962b7","Type":"ContainerDied","Data":"2d807319d6e1a5b66120cd9f815c4760e6d5eeabac912ca1278af577b37e9699"} Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.244280 4818 scope.go:117] "RemoveContainer" containerID="14bc5352f9568e678899cdff0b8914f538dc95e0140233f75e24d64861c0ef2d" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.262141 4818 scope.go:117] "RemoveContainer" containerID="14bc5352f9568e678899cdff0b8914f538dc95e0140233f75e24d64861c0ef2d" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.262771 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:21:45 crc kubenswrapper[4818]: E0930 17:21:45.262787 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14bc5352f9568e678899cdff0b8914f538dc95e0140233f75e24d64861c0ef2d\": container with ID starting with 14bc5352f9568e678899cdff0b8914f538dc95e0140233f75e24d64861c0ef2d not found: ID does not exist" containerID="14bc5352f9568e678899cdff0b8914f538dc95e0140233f75e24d64861c0ef2d" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.262855 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14bc5352f9568e678899cdff0b8914f538dc95e0140233f75e24d64861c0ef2d"} err="failed to get container status \"14bc5352f9568e678899cdff0b8914f538dc95e0140233f75e24d64861c0ef2d\": rpc error: code = NotFound desc = could not find container \"14bc5352f9568e678899cdff0b8914f538dc95e0140233f75e24d64861c0ef2d\": container with ID starting with 14bc5352f9568e678899cdff0b8914f538dc95e0140233f75e24d64861c0ef2d not found: ID does not exist" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.266787 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=2.266770461 podStartE2EDuration="2.266770461s" podCreationTimestamp="2025-09-30 17:21:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:21:45.256761731 +0000 UTC m=+1352.011033547" watchObservedRunningTime="2025-09-30 17:21:45.266770461 +0000 UTC m=+1352.021042267" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.290058 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.301358 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.309242 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:21:45 crc kubenswrapper[4818]: E0930 17:21:45.309830 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42776a44-2347-4224-8f79-82081d8962b7" containerName="watcher-decision-engine" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.310133 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="42776a44-2347-4224-8f79-82081d8962b7" containerName="watcher-decision-engine" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.310408 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="42776a44-2347-4224-8f79-82081d8962b7" containerName="watcher-decision-engine" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.311051 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.316311 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.351761 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.445934 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.446002 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9dlm\" (UniqueName: \"kubernetes.io/projected/47396e51-183d-4195-8a1e-2e10d824756a-kube-api-access-n9dlm\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.446051 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.446085 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.446137 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47396e51-183d-4195-8a1e-2e10d824756a-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.446195 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.547148 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.547202 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.547246 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47396e51-183d-4195-8a1e-2e10d824756a-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.547290 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.547338 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.547364 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9dlm\" (UniqueName: \"kubernetes.io/projected/47396e51-183d-4195-8a1e-2e10d824756a-kube-api-access-n9dlm\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.548049 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47396e51-183d-4195-8a1e-2e10d824756a-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.556215 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.556282 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.556840 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.557163 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.567169 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9dlm\" (UniqueName: \"kubernetes.io/projected/47396e51-183d-4195-8a1e-2e10d824756a-kube-api-access-n9dlm\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:45 crc kubenswrapper[4818]: I0930 17:21:45.756225 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:46 crc kubenswrapper[4818]: I0930 17:21:46.042987 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42776a44-2347-4224-8f79-82081d8962b7" path="/var/lib/kubelet/pods/42776a44-2347-4224-8f79-82081d8962b7/volumes" Sep 30 17:21:46 crc kubenswrapper[4818]: I0930 17:21:46.044137 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d794284-d473-467e-acdd-d14a44d28fb1" path="/var/lib/kubelet/pods/4d794284-d473-467e-acdd-d14a44d28fb1/volumes" Sep 30 17:21:46 crc kubenswrapper[4818]: I0930 17:21:46.253018 4818 generic.go:334] "Generic (PLEG): container finished" podID="bfb209b6-ed72-4796-92f5-85372aeaf10c" containerID="bcfea6f115fd374626dc950fd64bd9690dec691f687347dd78a97dbb3a5503a6" exitCode=0 Sep 30 17:21:46 crc kubenswrapper[4818]: I0930 17:21:46.253102 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" event={"ID":"bfb209b6-ed72-4796-92f5-85372aeaf10c","Type":"ContainerDied","Data":"bcfea6f115fd374626dc950fd64bd9690dec691f687347dd78a97dbb3a5503a6"} Sep 30 17:21:46 crc kubenswrapper[4818]: I0930 17:21:46.257571 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"a00533a7-cfa1-4094-9d55-8686d2f25d0f","Type":"ContainerStarted","Data":"40c426cf503ed926b69ba35275027fda2735f43fcc3e51c85811599bf58b2a1e"} Sep 30 17:21:46 crc kubenswrapper[4818]: I0930 17:21:46.257614 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"a00533a7-cfa1-4094-9d55-8686d2f25d0f","Type":"ContainerStarted","Data":"9893e111d85bd67e3e1396093a4ad02b06273e902e92939e7b88c912c9a12a59"} Sep 30 17:21:46 crc kubenswrapper[4818]: I0930 17:21:46.258289 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:46 crc kubenswrapper[4818]: I0930 17:21:46.294494 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:21:46 crc kubenswrapper[4818]: I0930 17:21:46.325694 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podStartSLOduration=2.325680644 podStartE2EDuration="2.325680644s" podCreationTimestamp="2025-09-30 17:21:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:21:46.287815231 +0000 UTC m=+1353.042087057" watchObservedRunningTime="2025-09-30 17:21:46.325680644 +0000 UTC m=+1353.079952460" Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.265176 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"47396e51-183d-4195-8a1e-2e10d824756a","Type":"ContainerStarted","Data":"847f036e7275e026ba12e534ce2feec414295edb5886b1f4eb6d354cdfef5ba5"} Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.265502 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"47396e51-183d-4195-8a1e-2e10d824756a","Type":"ContainerStarted","Data":"3a440443ea61b3e5023564306a0a9b20e917c78bcc77137e676bbfd701f35d27"} Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.287725 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=2.287705789 podStartE2EDuration="2.287705789s" podCreationTimestamp="2025-09-30 17:21:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:21:47.279608 +0000 UTC m=+1354.033879816" watchObservedRunningTime="2025-09-30 17:21:47.287705789 +0000 UTC m=+1354.041977605" Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.689344 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.720941 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-cert-memcached-mtls\") pod \"bfb209b6-ed72-4796-92f5-85372aeaf10c\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.720998 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nh7bj\" (UniqueName: \"kubernetes.io/projected/bfb209b6-ed72-4796-92f5-85372aeaf10c-kube-api-access-nh7bj\") pod \"bfb209b6-ed72-4796-92f5-85372aeaf10c\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.721027 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-scripts\") pod \"bfb209b6-ed72-4796-92f5-85372aeaf10c\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.721143 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-credential-keys\") pod \"bfb209b6-ed72-4796-92f5-85372aeaf10c\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.721167 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-combined-ca-bundle\") pod \"bfb209b6-ed72-4796-92f5-85372aeaf10c\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.721246 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-fernet-keys\") pod \"bfb209b6-ed72-4796-92f5-85372aeaf10c\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.721335 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-config-data\") pod \"bfb209b6-ed72-4796-92f5-85372aeaf10c\" (UID: \"bfb209b6-ed72-4796-92f5-85372aeaf10c\") " Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.735071 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-scripts" (OuterVolumeSpecName: "scripts") pod "bfb209b6-ed72-4796-92f5-85372aeaf10c" (UID: "bfb209b6-ed72-4796-92f5-85372aeaf10c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.746537 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "bfb209b6-ed72-4796-92f5-85372aeaf10c" (UID: "bfb209b6-ed72-4796-92f5-85372aeaf10c"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.746909 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfb209b6-ed72-4796-92f5-85372aeaf10c-kube-api-access-nh7bj" (OuterVolumeSpecName: "kube-api-access-nh7bj") pod "bfb209b6-ed72-4796-92f5-85372aeaf10c" (UID: "bfb209b6-ed72-4796-92f5-85372aeaf10c"). InnerVolumeSpecName "kube-api-access-nh7bj". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.748041 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "bfb209b6-ed72-4796-92f5-85372aeaf10c" (UID: "bfb209b6-ed72-4796-92f5-85372aeaf10c"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.748893 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-config-data" (OuterVolumeSpecName: "config-data") pod "bfb209b6-ed72-4796-92f5-85372aeaf10c" (UID: "bfb209b6-ed72-4796-92f5-85372aeaf10c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.770832 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bfb209b6-ed72-4796-92f5-85372aeaf10c" (UID: "bfb209b6-ed72-4796-92f5-85372aeaf10c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.823042 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.823083 4818 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-credential-keys\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.823092 4818 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-fernet-keys\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.823100 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.823108 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.823124 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nh7bj\" (UniqueName: \"kubernetes.io/projected/bfb209b6-ed72-4796-92f5-85372aeaf10c-kube-api-access-nh7bj\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.826056 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "bfb209b6-ed72-4796-92f5-85372aeaf10c" (UID: "bfb209b6-ed72-4796-92f5-85372aeaf10c"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:47 crc kubenswrapper[4818]: I0930 17:21:47.924475 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/bfb209b6-ed72-4796-92f5-85372aeaf10c-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:48 crc kubenswrapper[4818]: I0930 17:21:48.273167 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" event={"ID":"bfb209b6-ed72-4796-92f5-85372aeaf10c","Type":"ContainerDied","Data":"5e037268e9dfb0df7da85f9f1244510eaf4496e202b376016151a9c2869714c5"} Sep 30 17:21:48 crc kubenswrapper[4818]: I0930 17:21:48.273211 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5e037268e9dfb0df7da85f9f1244510eaf4496e202b376016151a9c2869714c5" Sep 30 17:21:48 crc kubenswrapper[4818]: I0930 17:21:48.273216 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-2jn4f" Sep 30 17:21:48 crc kubenswrapper[4818]: I0930 17:21:48.610612 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:48 crc kubenswrapper[4818]: I0930 17:21:48.611223 4818 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Sep 30 17:21:48 crc kubenswrapper[4818]: I0930 17:21:48.706555 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:49 crc kubenswrapper[4818]: I0930 17:21:49.652818 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:52 crc kubenswrapper[4818]: I0930 17:21:52.595126 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/memcached-0" Sep 30 17:21:52 crc kubenswrapper[4818]: I0930 17:21:52.797977 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n"] Sep 30 17:21:52 crc kubenswrapper[4818]: E0930 17:21:52.798274 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfb209b6-ed72-4796-92f5-85372aeaf10c" containerName="keystone-bootstrap" Sep 30 17:21:52 crc kubenswrapper[4818]: I0930 17:21:52.798288 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfb209b6-ed72-4796-92f5-85372aeaf10c" containerName="keystone-bootstrap" Sep 30 17:21:52 crc kubenswrapper[4818]: I0930 17:21:52.798467 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfb209b6-ed72-4796-92f5-85372aeaf10c" containerName="keystone-bootstrap" Sep 30 17:21:52 crc kubenswrapper[4818]: I0930 17:21:52.799035 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:52 crc kubenswrapper[4818]: I0930 17:21:52.815764 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n"] Sep 30 17:21:52 crc kubenswrapper[4818]: I0930 17:21:52.915788 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-cert-memcached-mtls\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:52 crc kubenswrapper[4818]: I0930 17:21:52.915848 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-combined-ca-bundle\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:52 crc kubenswrapper[4818]: I0930 17:21:52.915877 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-credential-keys\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:52 crc kubenswrapper[4818]: I0930 17:21:52.915895 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-internal-tls-certs\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:52 crc kubenswrapper[4818]: I0930 17:21:52.915912 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-config-data\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:52 crc kubenswrapper[4818]: I0930 17:21:52.915948 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-public-tls-certs\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:52 crc kubenswrapper[4818]: I0930 17:21:52.915991 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7mdc\" (UniqueName: \"kubernetes.io/projected/32034362-4dd5-4231-b991-837462326e1a-kube-api-access-r7mdc\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:52 crc kubenswrapper[4818]: I0930 17:21:52.916016 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-scripts\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:52 crc kubenswrapper[4818]: I0930 17:21:52.916034 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-fernet-keys\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.016865 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-fernet-keys\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.016978 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-cert-memcached-mtls\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.017012 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-combined-ca-bundle\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.017035 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-credential-keys\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.017050 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-internal-tls-certs\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.017069 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-config-data\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.017087 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-public-tls-certs\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.017130 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7mdc\" (UniqueName: \"kubernetes.io/projected/32034362-4dd5-4231-b991-837462326e1a-kube-api-access-r7mdc\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.017149 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-scripts\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.022534 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-credential-keys\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.022887 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-scripts\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.022899 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-combined-ca-bundle\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.023437 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-public-tls-certs\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.025360 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-cert-memcached-mtls\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.025475 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-config-data\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.026389 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-internal-tls-certs\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.040868 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7mdc\" (UniqueName: \"kubernetes.io/projected/32034362-4dd5-4231-b991-837462326e1a-kube-api-access-r7mdc\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.043064 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/32034362-4dd5-4231-b991-837462326e1a-fernet-keys\") pod \"keystone-55f7bfb7c9-pcc8n\" (UID: \"32034362-4dd5-4231-b991-837462326e1a\") " pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.117665 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:53 crc kubenswrapper[4818]: W0930 17:21:53.579700 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod32034362_4dd5_4231_b991_837462326e1a.slice/crio-2467e84ec4c48868c2c1d50927487e23bf3df7ec4c6ebdc9c748146f4e8e4647 WatchSource:0}: Error finding container 2467e84ec4c48868c2c1d50927487e23bf3df7ec4c6ebdc9c748146f4e8e4647: Status 404 returned error can't find the container with id 2467e84ec4c48868c2c1d50927487e23bf3df7ec4c6ebdc9c748146f4e8e4647 Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.588586 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n"] Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.611406 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:53 crc kubenswrapper[4818]: I0930 17:21:53.637194 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:54 crc kubenswrapper[4818]: I0930 17:21:54.326560 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" event={"ID":"32034362-4dd5-4231-b991-837462326e1a","Type":"ContainerStarted","Data":"766bfb36e6c6a3f9cf12f2e926d6205522c29276dcf11686b7ea5c891d895068"} Sep 30 17:21:54 crc kubenswrapper[4818]: I0930 17:21:54.326957 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" event={"ID":"32034362-4dd5-4231-b991-837462326e1a","Type":"ContainerStarted","Data":"2467e84ec4c48868c2c1d50927487e23bf3df7ec4c6ebdc9c748146f4e8e4647"} Sep 30 17:21:54 crc kubenswrapper[4818]: I0930 17:21:54.353106 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:54 crc kubenswrapper[4818]: I0930 17:21:54.356515 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" podStartSLOduration=2.35649038 podStartE2EDuration="2.35649038s" podCreationTimestamp="2025-09-30 17:21:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:21:54.353803788 +0000 UTC m=+1361.108075654" watchObservedRunningTime="2025-09-30 17:21:54.35649038 +0000 UTC m=+1361.110762196" Sep 30 17:21:54 crc kubenswrapper[4818]: I0930 17:21:54.472664 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:21:54 crc kubenswrapper[4818]: I0930 17:21:54.652296 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:54 crc kubenswrapper[4818]: I0930 17:21:54.681316 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:55 crc kubenswrapper[4818]: I0930 17:21:55.333432 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:21:55 crc kubenswrapper[4818]: I0930 17:21:55.357342 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:21:55 crc kubenswrapper[4818]: I0930 17:21:55.756602 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:55 crc kubenswrapper[4818]: I0930 17:21:55.792537 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:56 crc kubenswrapper[4818]: I0930 17:21:56.346397 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="b0a46038-8b31-4e29-b298-46d22ca94d05" containerName="watcher-kuttl-api-log" containerID="cri-o://9d1225f09882aadef8f15284c3cc353c7a9860ca95a9e7694ddc3ff04ca0320c" gracePeriod=30 Sep 30 17:21:56 crc kubenswrapper[4818]: I0930 17:21:56.346881 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="b0a46038-8b31-4e29-b298-46d22ca94d05" containerName="watcher-api" containerID="cri-o://2d1fbd7ab2b2714bc6eae12dadec3d780610cea92d0ca05ff5234671fac205c0" gracePeriod=30 Sep 30 17:21:56 crc kubenswrapper[4818]: I0930 17:21:56.347135 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:56 crc kubenswrapper[4818]: I0930 17:21:56.379611 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.292763 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.358597 4818 generic.go:334] "Generic (PLEG): container finished" podID="b0a46038-8b31-4e29-b298-46d22ca94d05" containerID="2d1fbd7ab2b2714bc6eae12dadec3d780610cea92d0ca05ff5234671fac205c0" exitCode=0 Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.358626 4818 generic.go:334] "Generic (PLEG): container finished" podID="b0a46038-8b31-4e29-b298-46d22ca94d05" containerID="9d1225f09882aadef8f15284c3cc353c7a9860ca95a9e7694ddc3ff04ca0320c" exitCode=143 Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.359469 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.359840 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b0a46038-8b31-4e29-b298-46d22ca94d05","Type":"ContainerDied","Data":"2d1fbd7ab2b2714bc6eae12dadec3d780610cea92d0ca05ff5234671fac205c0"} Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.359868 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b0a46038-8b31-4e29-b298-46d22ca94d05","Type":"ContainerDied","Data":"9d1225f09882aadef8f15284c3cc353c7a9860ca95a9e7694ddc3ff04ca0320c"} Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.359881 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b0a46038-8b31-4e29-b298-46d22ca94d05","Type":"ContainerDied","Data":"e25248ac0dd94a75178ab04e70fa46cb07af5c3f37db5ad5d86b2eaccb73b454"} Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.359896 4818 scope.go:117] "RemoveContainer" containerID="2d1fbd7ab2b2714bc6eae12dadec3d780610cea92d0ca05ff5234671fac205c0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.386678 4818 scope.go:117] "RemoveContainer" containerID="9d1225f09882aadef8f15284c3cc353c7a9860ca95a9e7694ddc3ff04ca0320c" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.392120 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjvxf\" (UniqueName: \"kubernetes.io/projected/b0a46038-8b31-4e29-b298-46d22ca94d05-kube-api-access-zjvxf\") pod \"b0a46038-8b31-4e29-b298-46d22ca94d05\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.392172 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-custom-prometheus-ca\") pod \"b0a46038-8b31-4e29-b298-46d22ca94d05\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.392210 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-config-data\") pod \"b0a46038-8b31-4e29-b298-46d22ca94d05\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.392269 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-internal-tls-certs\") pod \"b0a46038-8b31-4e29-b298-46d22ca94d05\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.392295 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-cert-memcached-mtls\") pod \"b0a46038-8b31-4e29-b298-46d22ca94d05\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.392362 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-combined-ca-bundle\") pod \"b0a46038-8b31-4e29-b298-46d22ca94d05\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.392401 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b0a46038-8b31-4e29-b298-46d22ca94d05-logs\") pod \"b0a46038-8b31-4e29-b298-46d22ca94d05\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.392454 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-public-tls-certs\") pod \"b0a46038-8b31-4e29-b298-46d22ca94d05\" (UID: \"b0a46038-8b31-4e29-b298-46d22ca94d05\") " Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.393435 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0a46038-8b31-4e29-b298-46d22ca94d05-logs" (OuterVolumeSpecName: "logs") pod "b0a46038-8b31-4e29-b298-46d22ca94d05" (UID: "b0a46038-8b31-4e29-b298-46d22ca94d05"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.398401 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0a46038-8b31-4e29-b298-46d22ca94d05-kube-api-access-zjvxf" (OuterVolumeSpecName: "kube-api-access-zjvxf") pod "b0a46038-8b31-4e29-b298-46d22ca94d05" (UID: "b0a46038-8b31-4e29-b298-46d22ca94d05"). InnerVolumeSpecName "kube-api-access-zjvxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.434100 4818 scope.go:117] "RemoveContainer" containerID="2d1fbd7ab2b2714bc6eae12dadec3d780610cea92d0ca05ff5234671fac205c0" Sep 30 17:21:57 crc kubenswrapper[4818]: E0930 17:21:57.434592 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d1fbd7ab2b2714bc6eae12dadec3d780610cea92d0ca05ff5234671fac205c0\": container with ID starting with 2d1fbd7ab2b2714bc6eae12dadec3d780610cea92d0ca05ff5234671fac205c0 not found: ID does not exist" containerID="2d1fbd7ab2b2714bc6eae12dadec3d780610cea92d0ca05ff5234671fac205c0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.434637 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d1fbd7ab2b2714bc6eae12dadec3d780610cea92d0ca05ff5234671fac205c0"} err="failed to get container status \"2d1fbd7ab2b2714bc6eae12dadec3d780610cea92d0ca05ff5234671fac205c0\": rpc error: code = NotFound desc = could not find container \"2d1fbd7ab2b2714bc6eae12dadec3d780610cea92d0ca05ff5234671fac205c0\": container with ID starting with 2d1fbd7ab2b2714bc6eae12dadec3d780610cea92d0ca05ff5234671fac205c0 not found: ID does not exist" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.434669 4818 scope.go:117] "RemoveContainer" containerID="9d1225f09882aadef8f15284c3cc353c7a9860ca95a9e7694ddc3ff04ca0320c" Sep 30 17:21:57 crc kubenswrapper[4818]: E0930 17:21:57.434999 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d1225f09882aadef8f15284c3cc353c7a9860ca95a9e7694ddc3ff04ca0320c\": container with ID starting with 9d1225f09882aadef8f15284c3cc353c7a9860ca95a9e7694ddc3ff04ca0320c not found: ID does not exist" containerID="9d1225f09882aadef8f15284c3cc353c7a9860ca95a9e7694ddc3ff04ca0320c" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.435038 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d1225f09882aadef8f15284c3cc353c7a9860ca95a9e7694ddc3ff04ca0320c"} err="failed to get container status \"9d1225f09882aadef8f15284c3cc353c7a9860ca95a9e7694ddc3ff04ca0320c\": rpc error: code = NotFound desc = could not find container \"9d1225f09882aadef8f15284c3cc353c7a9860ca95a9e7694ddc3ff04ca0320c\": container with ID starting with 9d1225f09882aadef8f15284c3cc353c7a9860ca95a9e7694ddc3ff04ca0320c not found: ID does not exist" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.435061 4818 scope.go:117] "RemoveContainer" containerID="2d1fbd7ab2b2714bc6eae12dadec3d780610cea92d0ca05ff5234671fac205c0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.435276 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d1fbd7ab2b2714bc6eae12dadec3d780610cea92d0ca05ff5234671fac205c0"} err="failed to get container status \"2d1fbd7ab2b2714bc6eae12dadec3d780610cea92d0ca05ff5234671fac205c0\": rpc error: code = NotFound desc = could not find container \"2d1fbd7ab2b2714bc6eae12dadec3d780610cea92d0ca05ff5234671fac205c0\": container with ID starting with 2d1fbd7ab2b2714bc6eae12dadec3d780610cea92d0ca05ff5234671fac205c0 not found: ID does not exist" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.435292 4818 scope.go:117] "RemoveContainer" containerID="9d1225f09882aadef8f15284c3cc353c7a9860ca95a9e7694ddc3ff04ca0320c" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.435467 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d1225f09882aadef8f15284c3cc353c7a9860ca95a9e7694ddc3ff04ca0320c"} err="failed to get container status \"9d1225f09882aadef8f15284c3cc353c7a9860ca95a9e7694ddc3ff04ca0320c\": rpc error: code = NotFound desc = could not find container \"9d1225f09882aadef8f15284c3cc353c7a9860ca95a9e7694ddc3ff04ca0320c\": container with ID starting with 9d1225f09882aadef8f15284c3cc353c7a9860ca95a9e7694ddc3ff04ca0320c not found: ID does not exist" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.442131 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-config-data" (OuterVolumeSpecName: "config-data") pod "b0a46038-8b31-4e29-b298-46d22ca94d05" (UID: "b0a46038-8b31-4e29-b298-46d22ca94d05"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.453114 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "b0a46038-8b31-4e29-b298-46d22ca94d05" (UID: "b0a46038-8b31-4e29-b298-46d22ca94d05"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.453246 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "b0a46038-8b31-4e29-b298-46d22ca94d05" (UID: "b0a46038-8b31-4e29-b298-46d22ca94d05"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.456178 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b0a46038-8b31-4e29-b298-46d22ca94d05" (UID: "b0a46038-8b31-4e29-b298-46d22ca94d05"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.463163 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "b0a46038-8b31-4e29-b298-46d22ca94d05" (UID: "b0a46038-8b31-4e29-b298-46d22ca94d05"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.465151 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b0a46038-8b31-4e29-b298-46d22ca94d05" (UID: "b0a46038-8b31-4e29-b298-46d22ca94d05"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.494170 4818 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.494198 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.494206 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.494215 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b0a46038-8b31-4e29-b298-46d22ca94d05-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.494223 4818 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-public-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.494231 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjvxf\" (UniqueName: \"kubernetes.io/projected/b0a46038-8b31-4e29-b298-46d22ca94d05-kube-api-access-zjvxf\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.494240 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.494250 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0a46038-8b31-4e29-b298-46d22ca94d05-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.693461 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.701028 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.715540 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:21:57 crc kubenswrapper[4818]: E0930 17:21:57.715878 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0a46038-8b31-4e29-b298-46d22ca94d05" containerName="watcher-kuttl-api-log" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.715900 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0a46038-8b31-4e29-b298-46d22ca94d05" containerName="watcher-kuttl-api-log" Sep 30 17:21:57 crc kubenswrapper[4818]: E0930 17:21:57.715915 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0a46038-8b31-4e29-b298-46d22ca94d05" containerName="watcher-api" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.716171 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0a46038-8b31-4e29-b298-46d22ca94d05" containerName="watcher-api" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.716391 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0a46038-8b31-4e29-b298-46d22ca94d05" containerName="watcher-kuttl-api-log" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.716430 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0a46038-8b31-4e29-b298-46d22ca94d05" containerName="watcher-api" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.717671 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.723043 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.733974 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.798309 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.798383 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.798432 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.798472 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd3c318e-092d-46e1-b097-932969b4ae2b-logs\") pod \"watcher-kuttl-api-0\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.798570 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.798615 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwgvf\" (UniqueName: \"kubernetes.io/projected/fd3c318e-092d-46e1-b097-932969b4ae2b-kube-api-access-mwgvf\") pod \"watcher-kuttl-api-0\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.900478 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.900562 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwgvf\" (UniqueName: \"kubernetes.io/projected/fd3c318e-092d-46e1-b097-932969b4ae2b-kube-api-access-mwgvf\") pod \"watcher-kuttl-api-0\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.900606 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.900623 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.900661 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.900691 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd3c318e-092d-46e1-b097-932969b4ae2b-logs\") pod \"watcher-kuttl-api-0\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.901357 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd3c318e-092d-46e1-b097-932969b4ae2b-logs\") pod \"watcher-kuttl-api-0\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.906172 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.906492 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.906629 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.921390 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:57 crc kubenswrapper[4818]: I0930 17:21:57.927583 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwgvf\" (UniqueName: \"kubernetes.io/projected/fd3c318e-092d-46e1-b097-932969b4ae2b-kube-api-access-mwgvf\") pod \"watcher-kuttl-api-0\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:58 crc kubenswrapper[4818]: I0930 17:21:58.033107 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0a46038-8b31-4e29-b298-46d22ca94d05" path="/var/lib/kubelet/pods/b0a46038-8b31-4e29-b298-46d22ca94d05/volumes" Sep 30 17:21:58 crc kubenswrapper[4818]: I0930 17:21:58.036056 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:58 crc kubenswrapper[4818]: I0930 17:21:58.537870 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:21:58 crc kubenswrapper[4818]: W0930 17:21:58.538809 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfd3c318e_092d_46e1_b097_932969b4ae2b.slice/crio-4b5df590e68a9d3460cf50c911fd9a87939b761a1ee255a938ef200659025273 WatchSource:0}: Error finding container 4b5df590e68a9d3460cf50c911fd9a87939b761a1ee255a938ef200659025273: Status 404 returned error can't find the container with id 4b5df590e68a9d3460cf50c911fd9a87939b761a1ee255a938ef200659025273 Sep 30 17:21:59 crc kubenswrapper[4818]: I0930 17:21:59.378610 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"fd3c318e-092d-46e1-b097-932969b4ae2b","Type":"ContainerStarted","Data":"36be5b46fb4f67938e1b9b16ee80097b4e12188b9f76d9018fb8daadaa54eae6"} Sep 30 17:21:59 crc kubenswrapper[4818]: I0930 17:21:59.378673 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"fd3c318e-092d-46e1-b097-932969b4ae2b","Type":"ContainerStarted","Data":"c340a62152cea4f8ce7d8684ec9b5a39ce40e8d5099c327e3cadacfcf16bae5e"} Sep 30 17:21:59 crc kubenswrapper[4818]: I0930 17:21:59.378696 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"fd3c318e-092d-46e1-b097-932969b4ae2b","Type":"ContainerStarted","Data":"4b5df590e68a9d3460cf50c911fd9a87939b761a1ee255a938ef200659025273"} Sep 30 17:21:59 crc kubenswrapper[4818]: I0930 17:21:59.379101 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:21:59 crc kubenswrapper[4818]: I0930 17:21:59.408193 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=2.4081461969999998 podStartE2EDuration="2.408146197s" podCreationTimestamp="2025-09-30 17:21:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:21:59.401315203 +0000 UTC m=+1366.155587029" watchObservedRunningTime="2025-09-30 17:21:59.408146197 +0000 UTC m=+1366.162418023" Sep 30 17:22:01 crc kubenswrapper[4818]: I0930 17:22:01.687624 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:22:02 crc kubenswrapper[4818]: I0930 17:22:02.790016 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:03 crc kubenswrapper[4818]: I0930 17:22:03.036592 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:22:08 crc kubenswrapper[4818]: I0930 17:22:08.036736 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:22:08 crc kubenswrapper[4818]: I0930 17:22:08.086762 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:22:08 crc kubenswrapper[4818]: I0930 17:22:08.462886 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:22:24 crc kubenswrapper[4818]: I0930 17:22:24.659448 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/keystone-55f7bfb7c9-pcc8n" Sep 30 17:22:24 crc kubenswrapper[4818]: I0930 17:22:24.722098 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8"] Sep 30 17:22:24 crc kubenswrapper[4818]: I0930 17:22:24.722545 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" podUID="4c434f52-318f-4151-a9bd-11eb9be54b5a" containerName="keystone-api" containerID="cri-o://4ee388ff07baacc12813196a88e4e25c4931723f8f084907455ed727593c9dd7" gracePeriod=30 Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.318293 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.460327 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-fernet-keys\") pod \"4c434f52-318f-4151-a9bd-11eb9be54b5a\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.460405 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-scripts\") pod \"4c434f52-318f-4151-a9bd-11eb9be54b5a\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.460434 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-internal-tls-certs\") pod \"4c434f52-318f-4151-a9bd-11eb9be54b5a\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.460485 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-config-data\") pod \"4c434f52-318f-4151-a9bd-11eb9be54b5a\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.460506 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-combined-ca-bundle\") pod \"4c434f52-318f-4151-a9bd-11eb9be54b5a\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.460534 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-credential-keys\") pod \"4c434f52-318f-4151-a9bd-11eb9be54b5a\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.460606 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sckl5\" (UniqueName: \"kubernetes.io/projected/4c434f52-318f-4151-a9bd-11eb9be54b5a-kube-api-access-sckl5\") pod \"4c434f52-318f-4151-a9bd-11eb9be54b5a\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.460663 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-public-tls-certs\") pod \"4c434f52-318f-4151-a9bd-11eb9be54b5a\" (UID: \"4c434f52-318f-4151-a9bd-11eb9be54b5a\") " Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.466760 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "4c434f52-318f-4151-a9bd-11eb9be54b5a" (UID: "4c434f52-318f-4151-a9bd-11eb9be54b5a"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.466837 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c434f52-318f-4151-a9bd-11eb9be54b5a-kube-api-access-sckl5" (OuterVolumeSpecName: "kube-api-access-sckl5") pod "4c434f52-318f-4151-a9bd-11eb9be54b5a" (UID: "4c434f52-318f-4151-a9bd-11eb9be54b5a"). InnerVolumeSpecName "kube-api-access-sckl5". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.467058 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-scripts" (OuterVolumeSpecName: "scripts") pod "4c434f52-318f-4151-a9bd-11eb9be54b5a" (UID: "4c434f52-318f-4151-a9bd-11eb9be54b5a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.470573 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "4c434f52-318f-4151-a9bd-11eb9be54b5a" (UID: "4c434f52-318f-4151-a9bd-11eb9be54b5a"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.536460 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-config-data" (OuterVolumeSpecName: "config-data") pod "4c434f52-318f-4151-a9bd-11eb9be54b5a" (UID: "4c434f52-318f-4151-a9bd-11eb9be54b5a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.537136 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c434f52-318f-4151-a9bd-11eb9be54b5a" (UID: "4c434f52-318f-4151-a9bd-11eb9be54b5a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.543869 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "4c434f52-318f-4151-a9bd-11eb9be54b5a" (UID: "4c434f52-318f-4151-a9bd-11eb9be54b5a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.562905 4818 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-fernet-keys\") on node \"crc\" DevicePath \"\"" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.562955 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.562964 4818 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.562973 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.562982 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.562990 4818 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-credential-keys\") on node \"crc\" DevicePath \"\"" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.563001 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sckl5\" (UniqueName: \"kubernetes.io/projected/4c434f52-318f-4151-a9bd-11eb9be54b5a-kube-api-access-sckl5\") on node \"crc\" DevicePath \"\"" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.612235 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "4c434f52-318f-4151-a9bd-11eb9be54b5a" (UID: "4c434f52-318f-4151-a9bd-11eb9be54b5a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.666213 4818 generic.go:334] "Generic (PLEG): container finished" podID="4c434f52-318f-4151-a9bd-11eb9be54b5a" containerID="4ee388ff07baacc12813196a88e4e25c4931723f8f084907455ed727593c9dd7" exitCode=0 Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.666260 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" event={"ID":"4c434f52-318f-4151-a9bd-11eb9be54b5a","Type":"ContainerDied","Data":"4ee388ff07baacc12813196a88e4e25c4931723f8f084907455ed727593c9dd7"} Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.666289 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" event={"ID":"4c434f52-318f-4151-a9bd-11eb9be54b5a","Type":"ContainerDied","Data":"1f180be6f3e98683bc15ca96bcd125b7353c5fc156c71ec8ce3ba105d954a61f"} Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.666308 4818 scope.go:117] "RemoveContainer" containerID="4ee388ff07baacc12813196a88e4e25c4931723f8f084907455ed727593c9dd7" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.666452 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.667563 4818 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c434f52-318f-4151-a9bd-11eb9be54b5a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.724810 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8"] Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.728090 4818 scope.go:117] "RemoveContainer" containerID="4ee388ff07baacc12813196a88e4e25c4931723f8f084907455ed727593c9dd7" Sep 30 17:22:28 crc kubenswrapper[4818]: E0930 17:22:28.730898 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ee388ff07baacc12813196a88e4e25c4931723f8f084907455ed727593c9dd7\": container with ID starting with 4ee388ff07baacc12813196a88e4e25c4931723f8f084907455ed727593c9dd7 not found: ID does not exist" containerID="4ee388ff07baacc12813196a88e4e25c4931723f8f084907455ed727593c9dd7" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.731362 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ee388ff07baacc12813196a88e4e25c4931723f8f084907455ed727593c9dd7"} err="failed to get container status \"4ee388ff07baacc12813196a88e4e25c4931723f8f084907455ed727593c9dd7\": rpc error: code = NotFound desc = could not find container \"4ee388ff07baacc12813196a88e4e25c4931723f8f084907455ed727593c9dd7\": container with ID starting with 4ee388ff07baacc12813196a88e4e25c4931723f8f084907455ed727593c9dd7 not found: ID does not exist" Sep 30 17:22:28 crc kubenswrapper[4818]: I0930 17:22:28.747930 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/keystone-54f6b9b5cb-zzgw8"] Sep 30 17:22:30 crc kubenswrapper[4818]: I0930 17:22:30.032259 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c434f52-318f-4151-a9bd-11eb9be54b5a" path="/var/lib/kubelet/pods/4c434f52-318f-4151-a9bd-11eb9be54b5a/volumes" Sep 30 17:22:31 crc kubenswrapper[4818]: I0930 17:22:31.743026 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:22:31 crc kubenswrapper[4818]: I0930 17:22:31.743872 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="e006d103-d4af-48b0-8189-66893c558088" containerName="ceilometer-central-agent" containerID="cri-o://f82048a141c44bd44aa76e652fc257848ad0531c75812162a4097cd2b2ae4f9f" gracePeriod=30 Sep 30 17:22:31 crc kubenswrapper[4818]: I0930 17:22:31.743950 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="e006d103-d4af-48b0-8189-66893c558088" containerName="proxy-httpd" containerID="cri-o://6a78201979aa72466fabeaa2fa959aae2d6761bf9606053c086b76a3f7eada56" gracePeriod=30 Sep 30 17:22:31 crc kubenswrapper[4818]: I0930 17:22:31.743971 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="e006d103-d4af-48b0-8189-66893c558088" containerName="ceilometer-notification-agent" containerID="cri-o://c524f2d2f57a59650a6d26dcd5f8a2686bdd91e97243a50afe4133e7d58200bf" gracePeriod=30 Sep 30 17:22:31 crc kubenswrapper[4818]: I0930 17:22:31.743987 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="e006d103-d4af-48b0-8189-66893c558088" containerName="sg-core" containerID="cri-o://f4a784a107efece76ef07a2c90eefad797dede30f55932f51d0986b70d9004d6" gracePeriod=30 Sep 30 17:22:32 crc kubenswrapper[4818]: I0930 17:22:32.697771 4818 generic.go:334] "Generic (PLEG): container finished" podID="e006d103-d4af-48b0-8189-66893c558088" containerID="6a78201979aa72466fabeaa2fa959aae2d6761bf9606053c086b76a3f7eada56" exitCode=0 Sep 30 17:22:32 crc kubenswrapper[4818]: I0930 17:22:32.698087 4818 generic.go:334] "Generic (PLEG): container finished" podID="e006d103-d4af-48b0-8189-66893c558088" containerID="f4a784a107efece76ef07a2c90eefad797dede30f55932f51d0986b70d9004d6" exitCode=2 Sep 30 17:22:32 crc kubenswrapper[4818]: I0930 17:22:32.698102 4818 generic.go:334] "Generic (PLEG): container finished" podID="e006d103-d4af-48b0-8189-66893c558088" containerID="f82048a141c44bd44aa76e652fc257848ad0531c75812162a4097cd2b2ae4f9f" exitCode=0 Sep 30 17:22:32 crc kubenswrapper[4818]: I0930 17:22:32.697849 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"e006d103-d4af-48b0-8189-66893c558088","Type":"ContainerDied","Data":"6a78201979aa72466fabeaa2fa959aae2d6761bf9606053c086b76a3f7eada56"} Sep 30 17:22:32 crc kubenswrapper[4818]: I0930 17:22:32.698143 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"e006d103-d4af-48b0-8189-66893c558088","Type":"ContainerDied","Data":"f4a784a107efece76ef07a2c90eefad797dede30f55932f51d0986b70d9004d6"} Sep 30 17:22:32 crc kubenswrapper[4818]: I0930 17:22:32.698161 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"e006d103-d4af-48b0-8189-66893c558088","Type":"ContainerDied","Data":"f82048a141c44bd44aa76e652fc257848ad0531c75812162a4097cd2b2ae4f9f"} Sep 30 17:22:32 crc kubenswrapper[4818]: I0930 17:22:32.782884 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/ceilometer-0" podUID="e006d103-d4af-48b0-8189-66893c558088" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.157:3000/\": dial tcp 10.217.0.157:3000: connect: connection refused" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.181768 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.304901 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-config-data\") pod \"e006d103-d4af-48b0-8189-66893c558088\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.305025 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-combined-ca-bundle\") pod \"e006d103-d4af-48b0-8189-66893c558088\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.305056 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-sg-core-conf-yaml\") pod \"e006d103-d4af-48b0-8189-66893c558088\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.305096 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5djql\" (UniqueName: \"kubernetes.io/projected/e006d103-d4af-48b0-8189-66893c558088-kube-api-access-5djql\") pod \"e006d103-d4af-48b0-8189-66893c558088\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.305156 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e006d103-d4af-48b0-8189-66893c558088-run-httpd\") pod \"e006d103-d4af-48b0-8189-66893c558088\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.305242 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-scripts\") pod \"e006d103-d4af-48b0-8189-66893c558088\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.305289 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-ceilometer-tls-certs\") pod \"e006d103-d4af-48b0-8189-66893c558088\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.305338 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e006d103-d4af-48b0-8189-66893c558088-log-httpd\") pod \"e006d103-d4af-48b0-8189-66893c558088\" (UID: \"e006d103-d4af-48b0-8189-66893c558088\") " Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.305636 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e006d103-d4af-48b0-8189-66893c558088-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e006d103-d4af-48b0-8189-66893c558088" (UID: "e006d103-d4af-48b0-8189-66893c558088"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.305763 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e006d103-d4af-48b0-8189-66893c558088-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e006d103-d4af-48b0-8189-66893c558088" (UID: "e006d103-d4af-48b0-8189-66893c558088"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.305977 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e006d103-d4af-48b0-8189-66893c558088-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.305997 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e006d103-d4af-48b0-8189-66893c558088-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.318205 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e006d103-d4af-48b0-8189-66893c558088-kube-api-access-5djql" (OuterVolumeSpecName: "kube-api-access-5djql") pod "e006d103-d4af-48b0-8189-66893c558088" (UID: "e006d103-d4af-48b0-8189-66893c558088"). InnerVolumeSpecName "kube-api-access-5djql". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.345197 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-scripts" (OuterVolumeSpecName: "scripts") pod "e006d103-d4af-48b0-8189-66893c558088" (UID: "e006d103-d4af-48b0-8189-66893c558088"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.414869 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5djql\" (UniqueName: \"kubernetes.io/projected/e006d103-d4af-48b0-8189-66893c558088-kube-api-access-5djql\") on node \"crc\" DevicePath \"\"" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.414900 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.438587 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "e006d103-d4af-48b0-8189-66893c558088" (UID: "e006d103-d4af-48b0-8189-66893c558088"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.442398 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e006d103-d4af-48b0-8189-66893c558088" (UID: "e006d103-d4af-48b0-8189-66893c558088"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.463410 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e006d103-d4af-48b0-8189-66893c558088" (UID: "e006d103-d4af-48b0-8189-66893c558088"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.501666 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-config-data" (OuterVolumeSpecName: "config-data") pod "e006d103-d4af-48b0-8189-66893c558088" (UID: "e006d103-d4af-48b0-8189-66893c558088"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.515863 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.515892 4818 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.515902 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.515911 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e006d103-d4af-48b0-8189-66893c558088-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.744657 4818 generic.go:334] "Generic (PLEG): container finished" podID="e006d103-d4af-48b0-8189-66893c558088" containerID="c524f2d2f57a59650a6d26dcd5f8a2686bdd91e97243a50afe4133e7d58200bf" exitCode=0 Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.744710 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"e006d103-d4af-48b0-8189-66893c558088","Type":"ContainerDied","Data":"c524f2d2f57a59650a6d26dcd5f8a2686bdd91e97243a50afe4133e7d58200bf"} Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.744745 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"e006d103-d4af-48b0-8189-66893c558088","Type":"ContainerDied","Data":"8d32c88c05a6faab0786c8079fee9c840a77fa19bc3b7fda6773a4172077e4bc"} Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.744765 4818 scope.go:117] "RemoveContainer" containerID="6a78201979aa72466fabeaa2fa959aae2d6761bf9606053c086b76a3f7eada56" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.744764 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.769725 4818 scope.go:117] "RemoveContainer" containerID="f4a784a107efece76ef07a2c90eefad797dede30f55932f51d0986b70d9004d6" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.776443 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.783459 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.790911 4818 scope.go:117] "RemoveContainer" containerID="c524f2d2f57a59650a6d26dcd5f8a2686bdd91e97243a50afe4133e7d58200bf" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.812828 4818 scope.go:117] "RemoveContainer" containerID="f82048a141c44bd44aa76e652fc257848ad0531c75812162a4097cd2b2ae4f9f" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.815641 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:22:37 crc kubenswrapper[4818]: E0930 17:22:37.816028 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e006d103-d4af-48b0-8189-66893c558088" containerName="proxy-httpd" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.816043 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="e006d103-d4af-48b0-8189-66893c558088" containerName="proxy-httpd" Sep 30 17:22:37 crc kubenswrapper[4818]: E0930 17:22:37.816057 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e006d103-d4af-48b0-8189-66893c558088" containerName="ceilometer-central-agent" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.816065 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="e006d103-d4af-48b0-8189-66893c558088" containerName="ceilometer-central-agent" Sep 30 17:22:37 crc kubenswrapper[4818]: E0930 17:22:37.816078 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e006d103-d4af-48b0-8189-66893c558088" containerName="sg-core" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.816084 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="e006d103-d4af-48b0-8189-66893c558088" containerName="sg-core" Sep 30 17:22:37 crc kubenswrapper[4818]: E0930 17:22:37.816099 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e006d103-d4af-48b0-8189-66893c558088" containerName="ceilometer-notification-agent" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.816104 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="e006d103-d4af-48b0-8189-66893c558088" containerName="ceilometer-notification-agent" Sep 30 17:22:37 crc kubenswrapper[4818]: E0930 17:22:37.816125 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c434f52-318f-4151-a9bd-11eb9be54b5a" containerName="keystone-api" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.816131 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c434f52-318f-4151-a9bd-11eb9be54b5a" containerName="keystone-api" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.816293 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c434f52-318f-4151-a9bd-11eb9be54b5a" containerName="keystone-api" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.816305 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="e006d103-d4af-48b0-8189-66893c558088" containerName="proxy-httpd" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.816321 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="e006d103-d4af-48b0-8189-66893c558088" containerName="ceilometer-central-agent" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.816331 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="e006d103-d4af-48b0-8189-66893c558088" containerName="sg-core" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.816338 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="e006d103-d4af-48b0-8189-66893c558088" containerName="ceilometer-notification-agent" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.817865 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.819905 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.823945 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.820823 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.820889 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.852888 4818 scope.go:117] "RemoveContainer" containerID="6a78201979aa72466fabeaa2fa959aae2d6761bf9606053c086b76a3f7eada56" Sep 30 17:22:37 crc kubenswrapper[4818]: E0930 17:22:37.853263 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a78201979aa72466fabeaa2fa959aae2d6761bf9606053c086b76a3f7eada56\": container with ID starting with 6a78201979aa72466fabeaa2fa959aae2d6761bf9606053c086b76a3f7eada56 not found: ID does not exist" containerID="6a78201979aa72466fabeaa2fa959aae2d6761bf9606053c086b76a3f7eada56" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.853312 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a78201979aa72466fabeaa2fa959aae2d6761bf9606053c086b76a3f7eada56"} err="failed to get container status \"6a78201979aa72466fabeaa2fa959aae2d6761bf9606053c086b76a3f7eada56\": rpc error: code = NotFound desc = could not find container \"6a78201979aa72466fabeaa2fa959aae2d6761bf9606053c086b76a3f7eada56\": container with ID starting with 6a78201979aa72466fabeaa2fa959aae2d6761bf9606053c086b76a3f7eada56 not found: ID does not exist" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.853343 4818 scope.go:117] "RemoveContainer" containerID="f4a784a107efece76ef07a2c90eefad797dede30f55932f51d0986b70d9004d6" Sep 30 17:22:37 crc kubenswrapper[4818]: E0930 17:22:37.853603 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4a784a107efece76ef07a2c90eefad797dede30f55932f51d0986b70d9004d6\": container with ID starting with f4a784a107efece76ef07a2c90eefad797dede30f55932f51d0986b70d9004d6 not found: ID does not exist" containerID="f4a784a107efece76ef07a2c90eefad797dede30f55932f51d0986b70d9004d6" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.853627 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4a784a107efece76ef07a2c90eefad797dede30f55932f51d0986b70d9004d6"} err="failed to get container status \"f4a784a107efece76ef07a2c90eefad797dede30f55932f51d0986b70d9004d6\": rpc error: code = NotFound desc = could not find container \"f4a784a107efece76ef07a2c90eefad797dede30f55932f51d0986b70d9004d6\": container with ID starting with f4a784a107efece76ef07a2c90eefad797dede30f55932f51d0986b70d9004d6 not found: ID does not exist" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.853644 4818 scope.go:117] "RemoveContainer" containerID="c524f2d2f57a59650a6d26dcd5f8a2686bdd91e97243a50afe4133e7d58200bf" Sep 30 17:22:37 crc kubenswrapper[4818]: E0930 17:22:37.854962 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c524f2d2f57a59650a6d26dcd5f8a2686bdd91e97243a50afe4133e7d58200bf\": container with ID starting with c524f2d2f57a59650a6d26dcd5f8a2686bdd91e97243a50afe4133e7d58200bf not found: ID does not exist" containerID="c524f2d2f57a59650a6d26dcd5f8a2686bdd91e97243a50afe4133e7d58200bf" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.855002 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c524f2d2f57a59650a6d26dcd5f8a2686bdd91e97243a50afe4133e7d58200bf"} err="failed to get container status \"c524f2d2f57a59650a6d26dcd5f8a2686bdd91e97243a50afe4133e7d58200bf\": rpc error: code = NotFound desc = could not find container \"c524f2d2f57a59650a6d26dcd5f8a2686bdd91e97243a50afe4133e7d58200bf\": container with ID starting with c524f2d2f57a59650a6d26dcd5f8a2686bdd91e97243a50afe4133e7d58200bf not found: ID does not exist" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.855021 4818 scope.go:117] "RemoveContainer" containerID="f82048a141c44bd44aa76e652fc257848ad0531c75812162a4097cd2b2ae4f9f" Sep 30 17:22:37 crc kubenswrapper[4818]: E0930 17:22:37.855319 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f82048a141c44bd44aa76e652fc257848ad0531c75812162a4097cd2b2ae4f9f\": container with ID starting with f82048a141c44bd44aa76e652fc257848ad0531c75812162a4097cd2b2ae4f9f not found: ID does not exist" containerID="f82048a141c44bd44aa76e652fc257848ad0531c75812162a4097cd2b2ae4f9f" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.855346 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f82048a141c44bd44aa76e652fc257848ad0531c75812162a4097cd2b2ae4f9f"} err="failed to get container status \"f82048a141c44bd44aa76e652fc257848ad0531c75812162a4097cd2b2ae4f9f\": rpc error: code = NotFound desc = could not find container \"f82048a141c44bd44aa76e652fc257848ad0531c75812162a4097cd2b2ae4f9f\": container with ID starting with f82048a141c44bd44aa76e652fc257848ad0531c75812162a4097cd2b2ae4f9f not found: ID does not exist" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.920578 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.920637 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvdbw\" (UniqueName: \"kubernetes.io/projected/4dbfd897-a889-47cf-9056-fce87e407f2d-kube-api-access-jvdbw\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.920661 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.920689 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-scripts\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.920743 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.920763 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-config-data\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.920809 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4dbfd897-a889-47cf-9056-fce87e407f2d-run-httpd\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:37 crc kubenswrapper[4818]: I0930 17:22:37.920844 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4dbfd897-a889-47cf-9056-fce87e407f2d-log-httpd\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.024867 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.024940 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvdbw\" (UniqueName: \"kubernetes.io/projected/4dbfd897-a889-47cf-9056-fce87e407f2d-kube-api-access-jvdbw\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.024974 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.025015 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-scripts\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.025113 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.025130 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-config-data\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.025191 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4dbfd897-a889-47cf-9056-fce87e407f2d-run-httpd\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.025236 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4dbfd897-a889-47cf-9056-fce87e407f2d-log-httpd\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.025562 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4dbfd897-a889-47cf-9056-fce87e407f2d-log-httpd\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.025900 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4dbfd897-a889-47cf-9056-fce87e407f2d-run-httpd\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.030467 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e006d103-d4af-48b0-8189-66893c558088" path="/var/lib/kubelet/pods/e006d103-d4af-48b0-8189-66893c558088/volumes" Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.030496 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-scripts\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.030651 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.031530 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.031762 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-config-data\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.037378 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.053318 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvdbw\" (UniqueName: \"kubernetes.io/projected/4dbfd897-a889-47cf-9056-fce87e407f2d-kube-api-access-jvdbw\") pod \"ceilometer-0\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.147247 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.585632 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:22:38 crc kubenswrapper[4818]: W0930 17:22:38.587794 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4dbfd897_a889_47cf_9056_fce87e407f2d.slice/crio-e11d88b593cbc768b22c836b39168a541a249b7d4365e8e6b83704497acc535f WatchSource:0}: Error finding container e11d88b593cbc768b22c836b39168a541a249b7d4365e8e6b83704497acc535f: Status 404 returned error can't find the container with id e11d88b593cbc768b22c836b39168a541a249b7d4365e8e6b83704497acc535f Sep 30 17:22:38 crc kubenswrapper[4818]: I0930 17:22:38.753193 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"4dbfd897-a889-47cf-9056-fce87e407f2d","Type":"ContainerStarted","Data":"e11d88b593cbc768b22c836b39168a541a249b7d4365e8e6b83704497acc535f"} Sep 30 17:22:39 crc kubenswrapper[4818]: I0930 17:22:39.760419 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"4dbfd897-a889-47cf-9056-fce87e407f2d","Type":"ContainerStarted","Data":"caec266e4920f2759e00346224ed400221e81308fde32c621803f4d1f5d0afea"} Sep 30 17:22:40 crc kubenswrapper[4818]: I0930 17:22:40.770144 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"4dbfd897-a889-47cf-9056-fce87e407f2d","Type":"ContainerStarted","Data":"33bcaf272dfc8d336870b5616406c70a80acd0fa63f10bf0b7ec33958ff58a45"} Sep 30 17:22:41 crc kubenswrapper[4818]: I0930 17:22:41.784587 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"4dbfd897-a889-47cf-9056-fce87e407f2d","Type":"ContainerStarted","Data":"d99401ad2c1a519082aeed9c6c3cc270f112e719f8773e782c2be06017d536d4"} Sep 30 17:22:43 crc kubenswrapper[4818]: I0930 17:22:43.799770 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"4dbfd897-a889-47cf-9056-fce87e407f2d","Type":"ContainerStarted","Data":"065b4a66aed7a888c011c419494c08fd551cd45cfb44d4119a548f770fe134aa"} Sep 30 17:22:43 crc kubenswrapper[4818]: I0930 17:22:43.801139 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:22:43 crc kubenswrapper[4818]: I0930 17:22:43.824212 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=2.688458669 podStartE2EDuration="6.824196389s" podCreationTimestamp="2025-09-30 17:22:37 +0000 UTC" firstStartedPulling="2025-09-30 17:22:38.589710469 +0000 UTC m=+1405.343982285" lastFinishedPulling="2025-09-30 17:22:42.725448189 +0000 UTC m=+1409.479720005" observedRunningTime="2025-09-30 17:22:43.82051897 +0000 UTC m=+1410.574790806" watchObservedRunningTime="2025-09-30 17:22:43.824196389 +0000 UTC m=+1410.578468205" Sep 30 17:23:08 crc kubenswrapper[4818]: I0930 17:23:08.160605 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:12 crc kubenswrapper[4818]: I0930 17:23:12.830044 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl"] Sep 30 17:23:12 crc kubenswrapper[4818]: I0930 17:23:12.837102 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-vw4zl"] Sep 30 17:23:12 crc kubenswrapper[4818]: I0930 17:23:12.898318 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher490f-account-delete-8k4x4"] Sep 30 17:23:12 crc kubenswrapper[4818]: I0930 17:23:12.899712 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher490f-account-delete-8k4x4" Sep 30 17:23:12 crc kubenswrapper[4818]: I0930 17:23:12.907621 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher490f-account-delete-8k4x4"] Sep 30 17:23:12 crc kubenswrapper[4818]: I0930 17:23:12.913481 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:23:12 crc kubenswrapper[4818]: I0930 17:23:12.913715 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="a00533a7-cfa1-4094-9d55-8686d2f25d0f" containerName="watcher-applier" containerID="cri-o://40c426cf503ed926b69ba35275027fda2735f43fcc3e51c85811599bf58b2a1e" gracePeriod=30 Sep 30 17:23:12 crc kubenswrapper[4818]: I0930 17:23:12.919624 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:23:12 crc kubenswrapper[4818]: I0930 17:23:12.919826 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="47396e51-183d-4195-8a1e-2e10d824756a" containerName="watcher-decision-engine" containerID="cri-o://847f036e7275e026ba12e534ce2feec414295edb5886b1f4eb6d354cdfef5ba5" gracePeriod=30 Sep 30 17:23:12 crc kubenswrapper[4818]: I0930 17:23:12.961901 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:23:12 crc kubenswrapper[4818]: I0930 17:23:12.962163 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="fd3c318e-092d-46e1-b097-932969b4ae2b" containerName="watcher-kuttl-api-log" containerID="cri-o://c340a62152cea4f8ce7d8684ec9b5a39ce40e8d5099c327e3cadacfcf16bae5e" gracePeriod=30 Sep 30 17:23:12 crc kubenswrapper[4818]: I0930 17:23:12.962480 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="fd3c318e-092d-46e1-b097-932969b4ae2b" containerName="watcher-api" containerID="cri-o://36be5b46fb4f67938e1b9b16ee80097b4e12188b9f76d9018fb8daadaa54eae6" gracePeriod=30 Sep 30 17:23:13 crc kubenswrapper[4818]: I0930 17:23:13.051303 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pxn2\" (UniqueName: \"kubernetes.io/projected/d57f16b3-345d-4601-b856-0748a34e1b8b-kube-api-access-9pxn2\") pod \"watcher490f-account-delete-8k4x4\" (UID: \"d57f16b3-345d-4601-b856-0748a34e1b8b\") " pod="watcher-kuttl-default/watcher490f-account-delete-8k4x4" Sep 30 17:23:13 crc kubenswrapper[4818]: I0930 17:23:13.110035 4818 generic.go:334] "Generic (PLEG): container finished" podID="fd3c318e-092d-46e1-b097-932969b4ae2b" containerID="c340a62152cea4f8ce7d8684ec9b5a39ce40e8d5099c327e3cadacfcf16bae5e" exitCode=143 Sep 30 17:23:13 crc kubenswrapper[4818]: I0930 17:23:13.110272 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"fd3c318e-092d-46e1-b097-932969b4ae2b","Type":"ContainerDied","Data":"c340a62152cea4f8ce7d8684ec9b5a39ce40e8d5099c327e3cadacfcf16bae5e"} Sep 30 17:23:13 crc kubenswrapper[4818]: I0930 17:23:13.152418 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pxn2\" (UniqueName: \"kubernetes.io/projected/d57f16b3-345d-4601-b856-0748a34e1b8b-kube-api-access-9pxn2\") pod \"watcher490f-account-delete-8k4x4\" (UID: \"d57f16b3-345d-4601-b856-0748a34e1b8b\") " pod="watcher-kuttl-default/watcher490f-account-delete-8k4x4" Sep 30 17:23:13 crc kubenswrapper[4818]: I0930 17:23:13.198208 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pxn2\" (UniqueName: \"kubernetes.io/projected/d57f16b3-345d-4601-b856-0748a34e1b8b-kube-api-access-9pxn2\") pod \"watcher490f-account-delete-8k4x4\" (UID: \"d57f16b3-345d-4601-b856-0748a34e1b8b\") " pod="watcher-kuttl-default/watcher490f-account-delete-8k4x4" Sep 30 17:23:13 crc kubenswrapper[4818]: I0930 17:23:13.264279 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher490f-account-delete-8k4x4" Sep 30 17:23:13 crc kubenswrapper[4818]: I0930 17:23:13.760296 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher490f-account-delete-8k4x4"] Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.032253 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44d49fb6-4def-4dfc-8550-f2d219c0ef64" path="/var/lib/kubelet/pods/44d49fb6-4def-4dfc-8550-f2d219c0ef64/volumes" Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.062154 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.112537 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="fd3c318e-092d-46e1-b097-932969b4ae2b" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.164:9322/\": read tcp 10.217.0.2:52236->10.217.0.164:9322: read: connection reset by peer" Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.112581 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="fd3c318e-092d-46e1-b097-932969b4ae2b" containerName="watcher-kuttl-api-log" probeResult="failure" output="Get \"http://10.217.0.164:9322/\": read tcp 10.217.0.2:52252->10.217.0.164:9322: read: connection reset by peer" Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.132169 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher490f-account-delete-8k4x4" event={"ID":"d57f16b3-345d-4601-b856-0748a34e1b8b","Type":"ContainerStarted","Data":"91564a2a2a8d30169140af78aa7b5e5655280aadb6056c32531dbd202bb6e1fc"} Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.132217 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher490f-account-delete-8k4x4" event={"ID":"d57f16b3-345d-4601-b856-0748a34e1b8b","Type":"ContainerStarted","Data":"cc0b694aa601c8efbcec0994c379f4d8fa738434c946712c7c0e043ded258dc1"} Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.142620 4818 generic.go:334] "Generic (PLEG): container finished" podID="a00533a7-cfa1-4094-9d55-8686d2f25d0f" containerID="40c426cf503ed926b69ba35275027fda2735f43fcc3e51c85811599bf58b2a1e" exitCode=0 Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.142686 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"a00533a7-cfa1-4094-9d55-8686d2f25d0f","Type":"ContainerDied","Data":"40c426cf503ed926b69ba35275027fda2735f43fcc3e51c85811599bf58b2a1e"} Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.142719 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"a00533a7-cfa1-4094-9d55-8686d2f25d0f","Type":"ContainerDied","Data":"9893e111d85bd67e3e1396093a4ad02b06273e902e92939e7b88c912c9a12a59"} Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.142741 4818 scope.go:117] "RemoveContainer" containerID="40c426cf503ed926b69ba35275027fda2735f43fcc3e51c85811599bf58b2a1e" Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.142943 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.166177 4818 scope.go:117] "RemoveContainer" containerID="40c426cf503ed926b69ba35275027fda2735f43fcc3e51c85811599bf58b2a1e" Sep 30 17:23:14 crc kubenswrapper[4818]: E0930 17:23:14.166729 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40c426cf503ed926b69ba35275027fda2735f43fcc3e51c85811599bf58b2a1e\": container with ID starting with 40c426cf503ed926b69ba35275027fda2735f43fcc3e51c85811599bf58b2a1e not found: ID does not exist" containerID="40c426cf503ed926b69ba35275027fda2735f43fcc3e51c85811599bf58b2a1e" Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.166762 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40c426cf503ed926b69ba35275027fda2735f43fcc3e51c85811599bf58b2a1e"} err="failed to get container status \"40c426cf503ed926b69ba35275027fda2735f43fcc3e51c85811599bf58b2a1e\": rpc error: code = NotFound desc = could not find container \"40c426cf503ed926b69ba35275027fda2735f43fcc3e51c85811599bf58b2a1e\": container with ID starting with 40c426cf503ed926b69ba35275027fda2735f43fcc3e51c85811599bf58b2a1e not found: ID does not exist" Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.169060 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher490f-account-delete-8k4x4" podStartSLOduration=2.169049411 podStartE2EDuration="2.169049411s" podCreationTimestamp="2025-09-30 17:23:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:23:14.166193834 +0000 UTC m=+1440.920465650" watchObservedRunningTime="2025-09-30 17:23:14.169049411 +0000 UTC m=+1440.923321227" Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.174170 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a00533a7-cfa1-4094-9d55-8686d2f25d0f-logs\") pod \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.174219 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8njzh\" (UniqueName: \"kubernetes.io/projected/a00533a7-cfa1-4094-9d55-8686d2f25d0f-kube-api-access-8njzh\") pod \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.174380 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a00533a7-cfa1-4094-9d55-8686d2f25d0f-config-data\") pod \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.174401 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a00533a7-cfa1-4094-9d55-8686d2f25d0f-cert-memcached-mtls\") pod \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.174436 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a00533a7-cfa1-4094-9d55-8686d2f25d0f-combined-ca-bundle\") pod \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\" (UID: \"a00533a7-cfa1-4094-9d55-8686d2f25d0f\") " Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.176054 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a00533a7-cfa1-4094-9d55-8686d2f25d0f-logs" (OuterVolumeSpecName: "logs") pod "a00533a7-cfa1-4094-9d55-8686d2f25d0f" (UID: "a00533a7-cfa1-4094-9d55-8686d2f25d0f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.201465 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a00533a7-cfa1-4094-9d55-8686d2f25d0f-kube-api-access-8njzh" (OuterVolumeSpecName: "kube-api-access-8njzh") pod "a00533a7-cfa1-4094-9d55-8686d2f25d0f" (UID: "a00533a7-cfa1-4094-9d55-8686d2f25d0f"). InnerVolumeSpecName "kube-api-access-8njzh". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.206730 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a00533a7-cfa1-4094-9d55-8686d2f25d0f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a00533a7-cfa1-4094-9d55-8686d2f25d0f" (UID: "a00533a7-cfa1-4094-9d55-8686d2f25d0f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.230756 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a00533a7-cfa1-4094-9d55-8686d2f25d0f-config-data" (OuterVolumeSpecName: "config-data") pod "a00533a7-cfa1-4094-9d55-8686d2f25d0f" (UID: "a00533a7-cfa1-4094-9d55-8686d2f25d0f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.277872 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a00533a7-cfa1-4094-9d55-8686d2f25d0f-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.277898 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a00533a7-cfa1-4094-9d55-8686d2f25d0f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.277908 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a00533a7-cfa1-4094-9d55-8686d2f25d0f-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.277916 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8njzh\" (UniqueName: \"kubernetes.io/projected/a00533a7-cfa1-4094-9d55-8686d2f25d0f-kube-api-access-8njzh\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.280757 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a00533a7-cfa1-4094-9d55-8686d2f25d0f-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "a00533a7-cfa1-4094-9d55-8686d2f25d0f" (UID: "a00533a7-cfa1-4094-9d55-8686d2f25d0f"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.379156 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a00533a7-cfa1-4094-9d55-8686d2f25d0f-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.474002 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.479286 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:23:14 crc kubenswrapper[4818]: I0930 17:23:14.959669 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.101177 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-combined-ca-bundle\") pod \"fd3c318e-092d-46e1-b097-932969b4ae2b\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.101619 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mwgvf\" (UniqueName: \"kubernetes.io/projected/fd3c318e-092d-46e1-b097-932969b4ae2b-kube-api-access-mwgvf\") pod \"fd3c318e-092d-46e1-b097-932969b4ae2b\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.101703 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-cert-memcached-mtls\") pod \"fd3c318e-092d-46e1-b097-932969b4ae2b\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.101786 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-custom-prometheus-ca\") pod \"fd3c318e-092d-46e1-b097-932969b4ae2b\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.101828 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-config-data\") pod \"fd3c318e-092d-46e1-b097-932969b4ae2b\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.101912 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd3c318e-092d-46e1-b097-932969b4ae2b-logs\") pod \"fd3c318e-092d-46e1-b097-932969b4ae2b\" (UID: \"fd3c318e-092d-46e1-b097-932969b4ae2b\") " Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.102530 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd3c318e-092d-46e1-b097-932969b4ae2b-logs" (OuterVolumeSpecName: "logs") pod "fd3c318e-092d-46e1-b097-932969b4ae2b" (UID: "fd3c318e-092d-46e1-b097-932969b4ae2b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.108386 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd3c318e-092d-46e1-b097-932969b4ae2b-kube-api-access-mwgvf" (OuterVolumeSpecName: "kube-api-access-mwgvf") pod "fd3c318e-092d-46e1-b097-932969b4ae2b" (UID: "fd3c318e-092d-46e1-b097-932969b4ae2b"). InnerVolumeSpecName "kube-api-access-mwgvf". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.129393 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fd3c318e-092d-46e1-b097-932969b4ae2b" (UID: "fd3c318e-092d-46e1-b097-932969b4ae2b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.149531 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "fd3c318e-092d-46e1-b097-932969b4ae2b" (UID: "fd3c318e-092d-46e1-b097-932969b4ae2b"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.152514 4818 generic.go:334] "Generic (PLEG): container finished" podID="d57f16b3-345d-4601-b856-0748a34e1b8b" containerID="91564a2a2a8d30169140af78aa7b5e5655280aadb6056c32531dbd202bb6e1fc" exitCode=0 Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.152578 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher490f-account-delete-8k4x4" event={"ID":"d57f16b3-345d-4601-b856-0748a34e1b8b","Type":"ContainerDied","Data":"91564a2a2a8d30169140af78aa7b5e5655280aadb6056c32531dbd202bb6e1fc"} Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.156450 4818 generic.go:334] "Generic (PLEG): container finished" podID="fd3c318e-092d-46e1-b097-932969b4ae2b" containerID="36be5b46fb4f67938e1b9b16ee80097b4e12188b9f76d9018fb8daadaa54eae6" exitCode=0 Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.156493 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"fd3c318e-092d-46e1-b097-932969b4ae2b","Type":"ContainerDied","Data":"36be5b46fb4f67938e1b9b16ee80097b4e12188b9f76d9018fb8daadaa54eae6"} Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.156521 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"fd3c318e-092d-46e1-b097-932969b4ae2b","Type":"ContainerDied","Data":"4b5df590e68a9d3460cf50c911fd9a87939b761a1ee255a938ef200659025273"} Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.156541 4818 scope.go:117] "RemoveContainer" containerID="36be5b46fb4f67938e1b9b16ee80097b4e12188b9f76d9018fb8daadaa54eae6" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.156645 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.168479 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-config-data" (OuterVolumeSpecName: "config-data") pod "fd3c318e-092d-46e1-b097-932969b4ae2b" (UID: "fd3c318e-092d-46e1-b097-932969b4ae2b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.181503 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "fd3c318e-092d-46e1-b097-932969b4ae2b" (UID: "fd3c318e-092d-46e1-b097-932969b4ae2b"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.203703 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.203748 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.203761 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.203772 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd3c318e-092d-46e1-b097-932969b4ae2b-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.203780 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd3c318e-092d-46e1-b097-932969b4ae2b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.203791 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mwgvf\" (UniqueName: \"kubernetes.io/projected/fd3c318e-092d-46e1-b097-932969b4ae2b-kube-api-access-mwgvf\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.249081 4818 scope.go:117] "RemoveContainer" containerID="c340a62152cea4f8ce7d8684ec9b5a39ce40e8d5099c327e3cadacfcf16bae5e" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.266115 4818 scope.go:117] "RemoveContainer" containerID="36be5b46fb4f67938e1b9b16ee80097b4e12188b9f76d9018fb8daadaa54eae6" Sep 30 17:23:15 crc kubenswrapper[4818]: E0930 17:23:15.266479 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36be5b46fb4f67938e1b9b16ee80097b4e12188b9f76d9018fb8daadaa54eae6\": container with ID starting with 36be5b46fb4f67938e1b9b16ee80097b4e12188b9f76d9018fb8daadaa54eae6 not found: ID does not exist" containerID="36be5b46fb4f67938e1b9b16ee80097b4e12188b9f76d9018fb8daadaa54eae6" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.266531 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36be5b46fb4f67938e1b9b16ee80097b4e12188b9f76d9018fb8daadaa54eae6"} err="failed to get container status \"36be5b46fb4f67938e1b9b16ee80097b4e12188b9f76d9018fb8daadaa54eae6\": rpc error: code = NotFound desc = could not find container \"36be5b46fb4f67938e1b9b16ee80097b4e12188b9f76d9018fb8daadaa54eae6\": container with ID starting with 36be5b46fb4f67938e1b9b16ee80097b4e12188b9f76d9018fb8daadaa54eae6 not found: ID does not exist" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.266570 4818 scope.go:117] "RemoveContainer" containerID="c340a62152cea4f8ce7d8684ec9b5a39ce40e8d5099c327e3cadacfcf16bae5e" Sep 30 17:23:15 crc kubenswrapper[4818]: E0930 17:23:15.267004 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c340a62152cea4f8ce7d8684ec9b5a39ce40e8d5099c327e3cadacfcf16bae5e\": container with ID starting with c340a62152cea4f8ce7d8684ec9b5a39ce40e8d5099c327e3cadacfcf16bae5e not found: ID does not exist" containerID="c340a62152cea4f8ce7d8684ec9b5a39ce40e8d5099c327e3cadacfcf16bae5e" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.267049 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c340a62152cea4f8ce7d8684ec9b5a39ce40e8d5099c327e3cadacfcf16bae5e"} err="failed to get container status \"c340a62152cea4f8ce7d8684ec9b5a39ce40e8d5099c327e3cadacfcf16bae5e\": rpc error: code = NotFound desc = could not find container \"c340a62152cea4f8ce7d8684ec9b5a39ce40e8d5099c327e3cadacfcf16bae5e\": container with ID starting with c340a62152cea4f8ce7d8684ec9b5a39ce40e8d5099c327e3cadacfcf16bae5e not found: ID does not exist" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.481747 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.487755 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:23:15 crc kubenswrapper[4818]: E0930 17:23:15.762846 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="847f036e7275e026ba12e534ce2feec414295edb5886b1f4eb6d354cdfef5ba5" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Sep 30 17:23:15 crc kubenswrapper[4818]: E0930 17:23:15.766623 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="847f036e7275e026ba12e534ce2feec414295edb5886b1f4eb6d354cdfef5ba5" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Sep 30 17:23:15 crc kubenswrapper[4818]: E0930 17:23:15.768482 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="847f036e7275e026ba12e534ce2feec414295edb5886b1f4eb6d354cdfef5ba5" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Sep 30 17:23:15 crc kubenswrapper[4818]: E0930 17:23:15.768530 4818 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="47396e51-183d-4195-8a1e-2e10d824756a" containerName="watcher-decision-engine" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.853135 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.853398 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerName="ceilometer-central-agent" containerID="cri-o://caec266e4920f2759e00346224ed400221e81308fde32c621803f4d1f5d0afea" gracePeriod=30 Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.853495 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerName="sg-core" containerID="cri-o://d99401ad2c1a519082aeed9c6c3cc270f112e719f8773e782c2be06017d536d4" gracePeriod=30 Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.853579 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerName="ceilometer-notification-agent" containerID="cri-o://33bcaf272dfc8d336870b5616406c70a80acd0fa63f10bf0b7ec33958ff58a45" gracePeriod=30 Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.853545 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerName="proxy-httpd" containerID="cri-o://065b4a66aed7a888c011c419494c08fd551cd45cfb44d4119a548f770fe134aa" gracePeriod=30 Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.910419 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tspg2"] Sep 30 17:23:15 crc kubenswrapper[4818]: E0930 17:23:15.910975 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a00533a7-cfa1-4094-9d55-8686d2f25d0f" containerName="watcher-applier" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.911004 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="a00533a7-cfa1-4094-9d55-8686d2f25d0f" containerName="watcher-applier" Sep 30 17:23:15 crc kubenswrapper[4818]: E0930 17:23:15.911037 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd3c318e-092d-46e1-b097-932969b4ae2b" containerName="watcher-api" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.911048 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd3c318e-092d-46e1-b097-932969b4ae2b" containerName="watcher-api" Sep 30 17:23:15 crc kubenswrapper[4818]: E0930 17:23:15.911085 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd3c318e-092d-46e1-b097-932969b4ae2b" containerName="watcher-kuttl-api-log" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.911098 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd3c318e-092d-46e1-b097-932969b4ae2b" containerName="watcher-kuttl-api-log" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.911332 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="a00533a7-cfa1-4094-9d55-8686d2f25d0f" containerName="watcher-applier" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.911362 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd3c318e-092d-46e1-b097-932969b4ae2b" containerName="watcher-api" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.911382 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd3c318e-092d-46e1-b097-932969b4ae2b" containerName="watcher-kuttl-api-log" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.913254 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tspg2" Sep 30 17:23:15 crc kubenswrapper[4818]: I0930 17:23:15.942268 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tspg2"] Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.015963 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvb7j\" (UniqueName: \"kubernetes.io/projected/ab36dfb2-948b-415b-8aa4-f6a80e40b22c-kube-api-access-nvb7j\") pod \"redhat-marketplace-tspg2\" (UID: \"ab36dfb2-948b-415b-8aa4-f6a80e40b22c\") " pod="openshift-marketplace/redhat-marketplace-tspg2" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.017317 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab36dfb2-948b-415b-8aa4-f6a80e40b22c-catalog-content\") pod \"redhat-marketplace-tspg2\" (UID: \"ab36dfb2-948b-415b-8aa4-f6a80e40b22c\") " pod="openshift-marketplace/redhat-marketplace-tspg2" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.017528 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab36dfb2-948b-415b-8aa4-f6a80e40b22c-utilities\") pod \"redhat-marketplace-tspg2\" (UID: \"ab36dfb2-948b-415b-8aa4-f6a80e40b22c\") " pod="openshift-marketplace/redhat-marketplace-tspg2" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.034363 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a00533a7-cfa1-4094-9d55-8686d2f25d0f" path="/var/lib/kubelet/pods/a00533a7-cfa1-4094-9d55-8686d2f25d0f/volumes" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.034909 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd3c318e-092d-46e1-b097-932969b4ae2b" path="/var/lib/kubelet/pods/fd3c318e-092d-46e1-b097-932969b4ae2b/volumes" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.119648 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab36dfb2-948b-415b-8aa4-f6a80e40b22c-catalog-content\") pod \"redhat-marketplace-tspg2\" (UID: \"ab36dfb2-948b-415b-8aa4-f6a80e40b22c\") " pod="openshift-marketplace/redhat-marketplace-tspg2" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.119725 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab36dfb2-948b-415b-8aa4-f6a80e40b22c-utilities\") pod \"redhat-marketplace-tspg2\" (UID: \"ab36dfb2-948b-415b-8aa4-f6a80e40b22c\") " pod="openshift-marketplace/redhat-marketplace-tspg2" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.119808 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvb7j\" (UniqueName: \"kubernetes.io/projected/ab36dfb2-948b-415b-8aa4-f6a80e40b22c-kube-api-access-nvb7j\") pod \"redhat-marketplace-tspg2\" (UID: \"ab36dfb2-948b-415b-8aa4-f6a80e40b22c\") " pod="openshift-marketplace/redhat-marketplace-tspg2" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.120446 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab36dfb2-948b-415b-8aa4-f6a80e40b22c-utilities\") pod \"redhat-marketplace-tspg2\" (UID: \"ab36dfb2-948b-415b-8aa4-f6a80e40b22c\") " pod="openshift-marketplace/redhat-marketplace-tspg2" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.120592 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab36dfb2-948b-415b-8aa4-f6a80e40b22c-catalog-content\") pod \"redhat-marketplace-tspg2\" (UID: \"ab36dfb2-948b-415b-8aa4-f6a80e40b22c\") " pod="openshift-marketplace/redhat-marketplace-tspg2" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.145007 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvb7j\" (UniqueName: \"kubernetes.io/projected/ab36dfb2-948b-415b-8aa4-f6a80e40b22c-kube-api-access-nvb7j\") pod \"redhat-marketplace-tspg2\" (UID: \"ab36dfb2-948b-415b-8aa4-f6a80e40b22c\") " pod="openshift-marketplace/redhat-marketplace-tspg2" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.207996 4818 generic.go:334] "Generic (PLEG): container finished" podID="47396e51-183d-4195-8a1e-2e10d824756a" containerID="847f036e7275e026ba12e534ce2feec414295edb5886b1f4eb6d354cdfef5ba5" exitCode=0 Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.208115 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"47396e51-183d-4195-8a1e-2e10d824756a","Type":"ContainerDied","Data":"847f036e7275e026ba12e534ce2feec414295edb5886b1f4eb6d354cdfef5ba5"} Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.212020 4818 generic.go:334] "Generic (PLEG): container finished" podID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerID="065b4a66aed7a888c011c419494c08fd551cd45cfb44d4119a548f770fe134aa" exitCode=0 Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.212048 4818 generic.go:334] "Generic (PLEG): container finished" podID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerID="d99401ad2c1a519082aeed9c6c3cc270f112e719f8773e782c2be06017d536d4" exitCode=2 Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.212113 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"4dbfd897-a889-47cf-9056-fce87e407f2d","Type":"ContainerDied","Data":"065b4a66aed7a888c011c419494c08fd551cd45cfb44d4119a548f770fe134aa"} Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.212146 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"4dbfd897-a889-47cf-9056-fce87e407f2d","Type":"ContainerDied","Data":"d99401ad2c1a519082aeed9c6c3cc270f112e719f8773e782c2be06017d536d4"} Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.290887 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tspg2" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.427763 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.532311 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-custom-prometheus-ca\") pod \"47396e51-183d-4195-8a1e-2e10d824756a\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.532462 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9dlm\" (UniqueName: \"kubernetes.io/projected/47396e51-183d-4195-8a1e-2e10d824756a-kube-api-access-n9dlm\") pod \"47396e51-183d-4195-8a1e-2e10d824756a\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.532500 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-cert-memcached-mtls\") pod \"47396e51-183d-4195-8a1e-2e10d824756a\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.532543 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-combined-ca-bundle\") pod \"47396e51-183d-4195-8a1e-2e10d824756a\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.532604 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-config-data\") pod \"47396e51-183d-4195-8a1e-2e10d824756a\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.532630 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47396e51-183d-4195-8a1e-2e10d824756a-logs\") pod \"47396e51-183d-4195-8a1e-2e10d824756a\" (UID: \"47396e51-183d-4195-8a1e-2e10d824756a\") " Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.533603 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47396e51-183d-4195-8a1e-2e10d824756a-logs" (OuterVolumeSpecName: "logs") pod "47396e51-183d-4195-8a1e-2e10d824756a" (UID: "47396e51-183d-4195-8a1e-2e10d824756a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.537432 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47396e51-183d-4195-8a1e-2e10d824756a-kube-api-access-n9dlm" (OuterVolumeSpecName: "kube-api-access-n9dlm") pod "47396e51-183d-4195-8a1e-2e10d824756a" (UID: "47396e51-183d-4195-8a1e-2e10d824756a"). InnerVolumeSpecName "kube-api-access-n9dlm". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.558130 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "47396e51-183d-4195-8a1e-2e10d824756a" (UID: "47396e51-183d-4195-8a1e-2e10d824756a"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.576663 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "47396e51-183d-4195-8a1e-2e10d824756a" (UID: "47396e51-183d-4195-8a1e-2e10d824756a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.596184 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-config-data" (OuterVolumeSpecName: "config-data") pod "47396e51-183d-4195-8a1e-2e10d824756a" (UID: "47396e51-183d-4195-8a1e-2e10d824756a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.622087 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "47396e51-183d-4195-8a1e-2e10d824756a" (UID: "47396e51-183d-4195-8a1e-2e10d824756a"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.631655 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher490f-account-delete-8k4x4" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.634698 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.634728 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9dlm\" (UniqueName: \"kubernetes.io/projected/47396e51-183d-4195-8a1e-2e10d824756a-kube-api-access-n9dlm\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.634739 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.634750 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.634761 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47396e51-183d-4195-8a1e-2e10d824756a-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.634772 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47396e51-183d-4195-8a1e-2e10d824756a-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.735637 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9pxn2\" (UniqueName: \"kubernetes.io/projected/d57f16b3-345d-4601-b856-0748a34e1b8b-kube-api-access-9pxn2\") pod \"d57f16b3-345d-4601-b856-0748a34e1b8b\" (UID: \"d57f16b3-345d-4601-b856-0748a34e1b8b\") " Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.739127 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d57f16b3-345d-4601-b856-0748a34e1b8b-kube-api-access-9pxn2" (OuterVolumeSpecName: "kube-api-access-9pxn2") pod "d57f16b3-345d-4601-b856-0748a34e1b8b" (UID: "d57f16b3-345d-4601-b856-0748a34e1b8b"). InnerVolumeSpecName "kube-api-access-9pxn2". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.837379 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9pxn2\" (UniqueName: \"kubernetes.io/projected/d57f16b3-345d-4601-b856-0748a34e1b8b-kube-api-access-9pxn2\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:16 crc kubenswrapper[4818]: I0930 17:23:16.927839 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tspg2"] Sep 30 17:23:16 crc kubenswrapper[4818]: W0930 17:23:16.967374 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podab36dfb2_948b_415b_8aa4_f6a80e40b22c.slice/crio-7c3946943eae44abcf3dbce802313de3d21e9e8060dbabd7d8bc57929db4becb WatchSource:0}: Error finding container 7c3946943eae44abcf3dbce802313de3d21e9e8060dbabd7d8bc57929db4becb: Status 404 returned error can't find the container with id 7c3946943eae44abcf3dbce802313de3d21e9e8060dbabd7d8bc57929db4becb Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.225963 4818 generic.go:334] "Generic (PLEG): container finished" podID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerID="33bcaf272dfc8d336870b5616406c70a80acd0fa63f10bf0b7ec33958ff58a45" exitCode=0 Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.226221 4818 generic.go:334] "Generic (PLEG): container finished" podID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerID="caec266e4920f2759e00346224ed400221e81308fde32c621803f4d1f5d0afea" exitCode=0 Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.226299 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"4dbfd897-a889-47cf-9056-fce87e407f2d","Type":"ContainerDied","Data":"33bcaf272dfc8d336870b5616406c70a80acd0fa63f10bf0b7ec33958ff58a45"} Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.226331 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"4dbfd897-a889-47cf-9056-fce87e407f2d","Type":"ContainerDied","Data":"caec266e4920f2759e00346224ed400221e81308fde32c621803f4d1f5d0afea"} Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.230428 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tspg2" event={"ID":"ab36dfb2-948b-415b-8aa4-f6a80e40b22c","Type":"ContainerStarted","Data":"c5ce4b7a86228d4bafc64dfd04a57d91c359195652a98639dbc63980ee5dad2d"} Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.230475 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tspg2" event={"ID":"ab36dfb2-948b-415b-8aa4-f6a80e40b22c","Type":"ContainerStarted","Data":"7c3946943eae44abcf3dbce802313de3d21e9e8060dbabd7d8bc57929db4becb"} Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.233151 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"47396e51-183d-4195-8a1e-2e10d824756a","Type":"ContainerDied","Data":"3a440443ea61b3e5023564306a0a9b20e917c78bcc77137e676bbfd701f35d27"} Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.233200 4818 scope.go:117] "RemoveContainer" containerID="847f036e7275e026ba12e534ce2feec414295edb5886b1f4eb6d354cdfef5ba5" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.233364 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.237166 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher490f-account-delete-8k4x4" event={"ID":"d57f16b3-345d-4601-b856-0748a34e1b8b","Type":"ContainerDied","Data":"cc0b694aa601c8efbcec0994c379f4d8fa738434c946712c7c0e043ded258dc1"} Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.237200 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cc0b694aa601c8efbcec0994c379f4d8fa738434c946712c7c0e043ded258dc1" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.237211 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher490f-account-delete-8k4x4" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.349592 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.355715 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.395764 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.445191 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4dbfd897-a889-47cf-9056-fce87e407f2d-log-httpd\") pod \"4dbfd897-a889-47cf-9056-fce87e407f2d\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.445322 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-config-data\") pod \"4dbfd897-a889-47cf-9056-fce87e407f2d\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.445347 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4dbfd897-a889-47cf-9056-fce87e407f2d-run-httpd\") pod \"4dbfd897-a889-47cf-9056-fce87e407f2d\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.445589 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4dbfd897-a889-47cf-9056-fce87e407f2d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4dbfd897-a889-47cf-9056-fce87e407f2d" (UID: "4dbfd897-a889-47cf-9056-fce87e407f2d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.445739 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4dbfd897-a889-47cf-9056-fce87e407f2d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4dbfd897-a889-47cf-9056-fce87e407f2d" (UID: "4dbfd897-a889-47cf-9056-fce87e407f2d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.445980 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-scripts\") pod \"4dbfd897-a889-47cf-9056-fce87e407f2d\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.446015 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-sg-core-conf-yaml\") pod \"4dbfd897-a889-47cf-9056-fce87e407f2d\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.446057 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jvdbw\" (UniqueName: \"kubernetes.io/projected/4dbfd897-a889-47cf-9056-fce87e407f2d-kube-api-access-jvdbw\") pod \"4dbfd897-a889-47cf-9056-fce87e407f2d\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.446135 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-combined-ca-bundle\") pod \"4dbfd897-a889-47cf-9056-fce87e407f2d\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.446162 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-ceilometer-tls-certs\") pod \"4dbfd897-a889-47cf-9056-fce87e407f2d\" (UID: \"4dbfd897-a889-47cf-9056-fce87e407f2d\") " Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.446461 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4dbfd897-a889-47cf-9056-fce87e407f2d-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.446476 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4dbfd897-a889-47cf-9056-fce87e407f2d-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.456200 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4dbfd897-a889-47cf-9056-fce87e407f2d-kube-api-access-jvdbw" (OuterVolumeSpecName: "kube-api-access-jvdbw") pod "4dbfd897-a889-47cf-9056-fce87e407f2d" (UID: "4dbfd897-a889-47cf-9056-fce87e407f2d"). InnerVolumeSpecName "kube-api-access-jvdbw". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.471090 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-scripts" (OuterVolumeSpecName: "scripts") pod "4dbfd897-a889-47cf-9056-fce87e407f2d" (UID: "4dbfd897-a889-47cf-9056-fce87e407f2d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.522505 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "4dbfd897-a889-47cf-9056-fce87e407f2d" (UID: "4dbfd897-a889-47cf-9056-fce87e407f2d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.528789 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "4dbfd897-a889-47cf-9056-fce87e407f2d" (UID: "4dbfd897-a889-47cf-9056-fce87e407f2d"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.548076 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.548101 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.548110 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jvdbw\" (UniqueName: \"kubernetes.io/projected/4dbfd897-a889-47cf-9056-fce87e407f2d-kube-api-access-jvdbw\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.548118 4818 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.563972 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4dbfd897-a889-47cf-9056-fce87e407f2d" (UID: "4dbfd897-a889-47cf-9056-fce87e407f2d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.587644 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-config-data" (OuterVolumeSpecName: "config-data") pod "4dbfd897-a889-47cf-9056-fce87e407f2d" (UID: "4dbfd897-a889-47cf-9056-fce87e407f2d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.649803 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.649847 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dbfd897-a889-47cf-9056-fce87e407f2d-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.909668 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-db-create-q9mdl"] Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.920418 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-db-create-q9mdl"] Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.933186 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-490f-account-create-69fj5"] Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.942012 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher490f-account-delete-8k4x4"] Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.948969 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-490f-account-create-69fj5"] Sep 30 17:23:17 crc kubenswrapper[4818]: I0930 17:23:17.955252 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher490f-account-delete-8k4x4"] Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.030362 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="107dc282-5a61-4890-ba8b-d3251cb7edab" path="/var/lib/kubelet/pods/107dc282-5a61-4890-ba8b-d3251cb7edab/volumes" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.031388 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47396e51-183d-4195-8a1e-2e10d824756a" path="/var/lib/kubelet/pods/47396e51-183d-4195-8a1e-2e10d824756a/volumes" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.031963 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b760ae74-7c53-479c-ba5b-32eded8b3f72" path="/var/lib/kubelet/pods/b760ae74-7c53-479c-ba5b-32eded8b3f72/volumes" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.032461 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d57f16b3-345d-4601-b856-0748a34e1b8b" path="/var/lib/kubelet/pods/d57f16b3-345d-4601-b856-0748a34e1b8b/volumes" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.249982 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.249989 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"4dbfd897-a889-47cf-9056-fce87e407f2d","Type":"ContainerDied","Data":"e11d88b593cbc768b22c836b39168a541a249b7d4365e8e6b83704497acc535f"} Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.250904 4818 scope.go:117] "RemoveContainer" containerID="065b4a66aed7a888c011c419494c08fd551cd45cfb44d4119a548f770fe134aa" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.252529 4818 generic.go:334] "Generic (PLEG): container finished" podID="ab36dfb2-948b-415b-8aa4-f6a80e40b22c" containerID="c5ce4b7a86228d4bafc64dfd04a57d91c359195652a98639dbc63980ee5dad2d" exitCode=0 Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.252607 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tspg2" event={"ID":"ab36dfb2-948b-415b-8aa4-f6a80e40b22c","Type":"ContainerDied","Data":"c5ce4b7a86228d4bafc64dfd04a57d91c359195652a98639dbc63980ee5dad2d"} Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.278904 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.293735 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.305256 4818 scope.go:117] "RemoveContainer" containerID="d99401ad2c1a519082aeed9c6c3cc270f112e719f8773e782c2be06017d536d4" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.315374 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:23:18 crc kubenswrapper[4818]: E0930 17:23:18.316299 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d57f16b3-345d-4601-b856-0748a34e1b8b" containerName="mariadb-account-delete" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.316316 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="d57f16b3-345d-4601-b856-0748a34e1b8b" containerName="mariadb-account-delete" Sep 30 17:23:18 crc kubenswrapper[4818]: E0930 17:23:18.316333 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerName="sg-core" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.316362 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerName="sg-core" Sep 30 17:23:18 crc kubenswrapper[4818]: E0930 17:23:18.316384 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerName="proxy-httpd" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.316391 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerName="proxy-httpd" Sep 30 17:23:18 crc kubenswrapper[4818]: E0930 17:23:18.316406 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerName="ceilometer-central-agent" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.316411 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerName="ceilometer-central-agent" Sep 30 17:23:18 crc kubenswrapper[4818]: E0930 17:23:18.316441 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerName="ceilometer-notification-agent" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.316448 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerName="ceilometer-notification-agent" Sep 30 17:23:18 crc kubenswrapper[4818]: E0930 17:23:18.316457 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47396e51-183d-4195-8a1e-2e10d824756a" containerName="watcher-decision-engine" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.316464 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="47396e51-183d-4195-8a1e-2e10d824756a" containerName="watcher-decision-engine" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.316682 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerName="ceilometer-central-agent" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.316694 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerName="ceilometer-notification-agent" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.316704 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerName="proxy-httpd" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.316722 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="47396e51-183d-4195-8a1e-2e10d824756a" containerName="watcher-decision-engine" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.316729 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="d57f16b3-345d-4601-b856-0748a34e1b8b" containerName="mariadb-account-delete" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.316763 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dbfd897-a889-47cf-9056-fce87e407f2d" containerName="sg-core" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.318397 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.322351 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.347164 4818 scope.go:117] "RemoveContainer" containerID="33bcaf272dfc8d336870b5616406c70a80acd0fa63f10bf0b7ec33958ff58a45" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.349207 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.349431 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.363586 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.363644 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c22dc5f8-114e-4b46-9f08-4936a8972056-run-httpd\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.363675 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.363776 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hcvtr\" (UniqueName: \"kubernetes.io/projected/c22dc5f8-114e-4b46-9f08-4936a8972056-kube-api-access-hcvtr\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.363822 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c22dc5f8-114e-4b46-9f08-4936a8972056-log-httpd\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.363864 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.363901 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-scripts\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.363963 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-config-data\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.378129 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.378285 4818 scope.go:117] "RemoveContainer" containerID="caec266e4920f2759e00346224ed400221e81308fde32c621803f4d1f5d0afea" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.464990 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hcvtr\" (UniqueName: \"kubernetes.io/projected/c22dc5f8-114e-4b46-9f08-4936a8972056-kube-api-access-hcvtr\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.465094 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c22dc5f8-114e-4b46-9f08-4936a8972056-log-httpd\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.465165 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.465200 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-scripts\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.465227 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-config-data\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.465406 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.465475 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c22dc5f8-114e-4b46-9f08-4936a8972056-run-httpd\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.465551 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.465896 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c22dc5f8-114e-4b46-9f08-4936a8972056-log-httpd\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.466034 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c22dc5f8-114e-4b46-9f08-4936a8972056-run-httpd\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.470251 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.473333 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-scripts\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.473391 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.477267 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-config-data\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.480276 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hcvtr\" (UniqueName: \"kubernetes.io/projected/c22dc5f8-114e-4b46-9f08-4936a8972056-kube-api-access-hcvtr\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.482194 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:18 crc kubenswrapper[4818]: I0930 17:23:18.660595 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:19 crc kubenswrapper[4818]: I0930 17:23:19.103337 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:23:19 crc kubenswrapper[4818]: W0930 17:23:19.107528 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc22dc5f8_114e_4b46_9f08_4936a8972056.slice/crio-c53e01d6bcc03047bc5fca649cb6c6a465b88b9895a7e75e844f8991290598d8 WatchSource:0}: Error finding container c53e01d6bcc03047bc5fca649cb6c6a465b88b9895a7e75e844f8991290598d8: Status 404 returned error can't find the container with id c53e01d6bcc03047bc5fca649cb6c6a465b88b9895a7e75e844f8991290598d8 Sep 30 17:23:19 crc kubenswrapper[4818]: I0930 17:23:19.272599 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c22dc5f8-114e-4b46-9f08-4936a8972056","Type":"ContainerStarted","Data":"c53e01d6bcc03047bc5fca649cb6c6a465b88b9895a7e75e844f8991290598d8"} Sep 30 17:23:19 crc kubenswrapper[4818]: I0930 17:23:19.700901 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-db-create-kxfgv"] Sep 30 17:23:19 crc kubenswrapper[4818]: I0930 17:23:19.702224 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-kxfgv" Sep 30 17:23:19 crc kubenswrapper[4818]: I0930 17:23:19.712899 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-kxfgv"] Sep 30 17:23:19 crc kubenswrapper[4818]: I0930 17:23:19.789546 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqjgr\" (UniqueName: \"kubernetes.io/projected/509ba697-f02e-4e73-b634-ff5b48e58f77-kube-api-access-cqjgr\") pod \"watcher-db-create-kxfgv\" (UID: \"509ba697-f02e-4e73-b634-ff5b48e58f77\") " pod="watcher-kuttl-default/watcher-db-create-kxfgv" Sep 30 17:23:19 crc kubenswrapper[4818]: I0930 17:23:19.891035 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqjgr\" (UniqueName: \"kubernetes.io/projected/509ba697-f02e-4e73-b634-ff5b48e58f77-kube-api-access-cqjgr\") pod \"watcher-db-create-kxfgv\" (UID: \"509ba697-f02e-4e73-b634-ff5b48e58f77\") " pod="watcher-kuttl-default/watcher-db-create-kxfgv" Sep 30 17:23:19 crc kubenswrapper[4818]: I0930 17:23:19.910711 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqjgr\" (UniqueName: \"kubernetes.io/projected/509ba697-f02e-4e73-b634-ff5b48e58f77-kube-api-access-cqjgr\") pod \"watcher-db-create-kxfgv\" (UID: \"509ba697-f02e-4e73-b634-ff5b48e58f77\") " pod="watcher-kuttl-default/watcher-db-create-kxfgv" Sep 30 17:23:20 crc kubenswrapper[4818]: I0930 17:23:20.022983 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-kxfgv" Sep 30 17:23:20 crc kubenswrapper[4818]: I0930 17:23:20.033629 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4dbfd897-a889-47cf-9056-fce87e407f2d" path="/var/lib/kubelet/pods/4dbfd897-a889-47cf-9056-fce87e407f2d/volumes" Sep 30 17:23:20 crc kubenswrapper[4818]: I0930 17:23:20.309361 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c22dc5f8-114e-4b46-9f08-4936a8972056","Type":"ContainerStarted","Data":"2b196f6dd49e3fc7f4e924af70e11abd1ba8467efbe68230658a84c2a7e63042"} Sep 30 17:23:20 crc kubenswrapper[4818]: I0930 17:23:20.317050 4818 generic.go:334] "Generic (PLEG): container finished" podID="ab36dfb2-948b-415b-8aa4-f6a80e40b22c" containerID="45d3f53079ba7eddec2c293003245f40b37f1dfdf8c448ced3612e8ede0010ba" exitCode=0 Sep 30 17:23:20 crc kubenswrapper[4818]: I0930 17:23:20.317104 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tspg2" event={"ID":"ab36dfb2-948b-415b-8aa4-f6a80e40b22c","Type":"ContainerDied","Data":"45d3f53079ba7eddec2c293003245f40b37f1dfdf8c448ced3612e8ede0010ba"} Sep 30 17:23:20 crc kubenswrapper[4818]: I0930 17:23:20.586434 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-kxfgv"] Sep 30 17:23:20 crc kubenswrapper[4818]: W0930 17:23:20.633070 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod509ba697_f02e_4e73_b634_ff5b48e58f77.slice/crio-8b35eea21408ce36adf5499c122d0fefc6741b991adc8233c0c6e743d3261416 WatchSource:0}: Error finding container 8b35eea21408ce36adf5499c122d0fefc6741b991adc8233c0c6e743d3261416: Status 404 returned error can't find the container with id 8b35eea21408ce36adf5499c122d0fefc6741b991adc8233c0c6e743d3261416 Sep 30 17:23:21 crc kubenswrapper[4818]: I0930 17:23:21.325416 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tspg2" event={"ID":"ab36dfb2-948b-415b-8aa4-f6a80e40b22c","Type":"ContainerStarted","Data":"04a7abfa7afd9d74dc59368a3af64149140e061912f167407d3db7ccde19cff2"} Sep 30 17:23:21 crc kubenswrapper[4818]: I0930 17:23:21.328317 4818 generic.go:334] "Generic (PLEG): container finished" podID="509ba697-f02e-4e73-b634-ff5b48e58f77" containerID="230457141ad594e100eab2a73d72a0e9142c7dcc67320de3f47e61b5e280c4a1" exitCode=0 Sep 30 17:23:21 crc kubenswrapper[4818]: I0930 17:23:21.328383 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-kxfgv" event={"ID":"509ba697-f02e-4e73-b634-ff5b48e58f77","Type":"ContainerDied","Data":"230457141ad594e100eab2a73d72a0e9142c7dcc67320de3f47e61b5e280c4a1"} Sep 30 17:23:21 crc kubenswrapper[4818]: I0930 17:23:21.328407 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-kxfgv" event={"ID":"509ba697-f02e-4e73-b634-ff5b48e58f77","Type":"ContainerStarted","Data":"8b35eea21408ce36adf5499c122d0fefc6741b991adc8233c0c6e743d3261416"} Sep 30 17:23:21 crc kubenswrapper[4818]: I0930 17:23:21.330302 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c22dc5f8-114e-4b46-9f08-4936a8972056","Type":"ContainerStarted","Data":"c5f33e27139ecfa8fa90423cf96a2dc5ef8e86a85df7733dbbbdbe455ed4e507"} Sep 30 17:23:21 crc kubenswrapper[4818]: I0930 17:23:21.348941 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tspg2" podStartSLOduration=3.512764664 podStartE2EDuration="6.348909755s" podCreationTimestamp="2025-09-30 17:23:15 +0000 UTC" firstStartedPulling="2025-09-30 17:23:18.254840622 +0000 UTC m=+1445.009112458" lastFinishedPulling="2025-09-30 17:23:21.090985713 +0000 UTC m=+1447.845257549" observedRunningTime="2025-09-30 17:23:21.344538597 +0000 UTC m=+1448.098810413" watchObservedRunningTime="2025-09-30 17:23:21.348909755 +0000 UTC m=+1448.103181571" Sep 30 17:23:22 crc kubenswrapper[4818]: I0930 17:23:22.342982 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c22dc5f8-114e-4b46-9f08-4936a8972056","Type":"ContainerStarted","Data":"3bbfc08ad1afcac147976aa3ccd8b1989088413246d3ba8aced67515a3882efc"} Sep 30 17:23:22 crc kubenswrapper[4818]: I0930 17:23:22.596332 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:23:22 crc kubenswrapper[4818]: I0930 17:23:22.596390 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:23:22 crc kubenswrapper[4818]: I0930 17:23:22.782467 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-kxfgv" Sep 30 17:23:22 crc kubenswrapper[4818]: I0930 17:23:22.844019 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqjgr\" (UniqueName: \"kubernetes.io/projected/509ba697-f02e-4e73-b634-ff5b48e58f77-kube-api-access-cqjgr\") pod \"509ba697-f02e-4e73-b634-ff5b48e58f77\" (UID: \"509ba697-f02e-4e73-b634-ff5b48e58f77\") " Sep 30 17:23:22 crc kubenswrapper[4818]: I0930 17:23:22.854241 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/509ba697-f02e-4e73-b634-ff5b48e58f77-kube-api-access-cqjgr" (OuterVolumeSpecName: "kube-api-access-cqjgr") pod "509ba697-f02e-4e73-b634-ff5b48e58f77" (UID: "509ba697-f02e-4e73-b634-ff5b48e58f77"). InnerVolumeSpecName "kube-api-access-cqjgr". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:23:22 crc kubenswrapper[4818]: I0930 17:23:22.946292 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqjgr\" (UniqueName: \"kubernetes.io/projected/509ba697-f02e-4e73-b634-ff5b48e58f77-kube-api-access-cqjgr\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:23 crc kubenswrapper[4818]: I0930 17:23:23.316307 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-56967"] Sep 30 17:23:23 crc kubenswrapper[4818]: E0930 17:23:23.316810 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="509ba697-f02e-4e73-b634-ff5b48e58f77" containerName="mariadb-database-create" Sep 30 17:23:23 crc kubenswrapper[4818]: I0930 17:23:23.316839 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="509ba697-f02e-4e73-b634-ff5b48e58f77" containerName="mariadb-database-create" Sep 30 17:23:23 crc kubenswrapper[4818]: I0930 17:23:23.317154 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="509ba697-f02e-4e73-b634-ff5b48e58f77" containerName="mariadb-database-create" Sep 30 17:23:23 crc kubenswrapper[4818]: I0930 17:23:23.319182 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-56967" Sep 30 17:23:23 crc kubenswrapper[4818]: I0930 17:23:23.323284 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-56967"] Sep 30 17:23:23 crc kubenswrapper[4818]: I0930 17:23:23.353500 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1577ac1d-bc53-4505-a4c0-9e64a16fca2b-utilities\") pod \"community-operators-56967\" (UID: \"1577ac1d-bc53-4505-a4c0-9e64a16fca2b\") " pod="openshift-marketplace/community-operators-56967" Sep 30 17:23:23 crc kubenswrapper[4818]: I0930 17:23:23.353555 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5jws\" (UniqueName: \"kubernetes.io/projected/1577ac1d-bc53-4505-a4c0-9e64a16fca2b-kube-api-access-d5jws\") pod \"community-operators-56967\" (UID: \"1577ac1d-bc53-4505-a4c0-9e64a16fca2b\") " pod="openshift-marketplace/community-operators-56967" Sep 30 17:23:23 crc kubenswrapper[4818]: I0930 17:23:23.353589 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1577ac1d-bc53-4505-a4c0-9e64a16fca2b-catalog-content\") pod \"community-operators-56967\" (UID: \"1577ac1d-bc53-4505-a4c0-9e64a16fca2b\") " pod="openshift-marketplace/community-operators-56967" Sep 30 17:23:23 crc kubenswrapper[4818]: I0930 17:23:23.356784 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-kxfgv" event={"ID":"509ba697-f02e-4e73-b634-ff5b48e58f77","Type":"ContainerDied","Data":"8b35eea21408ce36adf5499c122d0fefc6741b991adc8233c0c6e743d3261416"} Sep 30 17:23:23 crc kubenswrapper[4818]: I0930 17:23:23.356828 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8b35eea21408ce36adf5499c122d0fefc6741b991adc8233c0c6e743d3261416" Sep 30 17:23:23 crc kubenswrapper[4818]: I0930 17:23:23.356885 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-kxfgv" Sep 30 17:23:23 crc kubenswrapper[4818]: I0930 17:23:23.454685 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1577ac1d-bc53-4505-a4c0-9e64a16fca2b-utilities\") pod \"community-operators-56967\" (UID: \"1577ac1d-bc53-4505-a4c0-9e64a16fca2b\") " pod="openshift-marketplace/community-operators-56967" Sep 30 17:23:23 crc kubenswrapper[4818]: I0930 17:23:23.454753 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5jws\" (UniqueName: \"kubernetes.io/projected/1577ac1d-bc53-4505-a4c0-9e64a16fca2b-kube-api-access-d5jws\") pod \"community-operators-56967\" (UID: \"1577ac1d-bc53-4505-a4c0-9e64a16fca2b\") " pod="openshift-marketplace/community-operators-56967" Sep 30 17:23:23 crc kubenswrapper[4818]: I0930 17:23:23.454778 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1577ac1d-bc53-4505-a4c0-9e64a16fca2b-catalog-content\") pod \"community-operators-56967\" (UID: \"1577ac1d-bc53-4505-a4c0-9e64a16fca2b\") " pod="openshift-marketplace/community-operators-56967" Sep 30 17:23:23 crc kubenswrapper[4818]: I0930 17:23:23.455259 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1577ac1d-bc53-4505-a4c0-9e64a16fca2b-catalog-content\") pod \"community-operators-56967\" (UID: \"1577ac1d-bc53-4505-a4c0-9e64a16fca2b\") " pod="openshift-marketplace/community-operators-56967" Sep 30 17:23:23 crc kubenswrapper[4818]: I0930 17:23:23.455480 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1577ac1d-bc53-4505-a4c0-9e64a16fca2b-utilities\") pod \"community-operators-56967\" (UID: \"1577ac1d-bc53-4505-a4c0-9e64a16fca2b\") " pod="openshift-marketplace/community-operators-56967" Sep 30 17:23:23 crc kubenswrapper[4818]: I0930 17:23:23.473312 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5jws\" (UniqueName: \"kubernetes.io/projected/1577ac1d-bc53-4505-a4c0-9e64a16fca2b-kube-api-access-d5jws\") pod \"community-operators-56967\" (UID: \"1577ac1d-bc53-4505-a4c0-9e64a16fca2b\") " pod="openshift-marketplace/community-operators-56967" Sep 30 17:23:23 crc kubenswrapper[4818]: I0930 17:23:23.742386 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-56967" Sep 30 17:23:24 crc kubenswrapper[4818]: W0930 17:23:24.257679 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1577ac1d_bc53_4505_a4c0_9e64a16fca2b.slice/crio-db4d7f7113311663e08727dfa2bf13bba638789a8e4fe4466a16e68695fc1a5c WatchSource:0}: Error finding container db4d7f7113311663e08727dfa2bf13bba638789a8e4fe4466a16e68695fc1a5c: Status 404 returned error can't find the container with id db4d7f7113311663e08727dfa2bf13bba638789a8e4fe4466a16e68695fc1a5c Sep 30 17:23:24 crc kubenswrapper[4818]: I0930 17:23:24.268161 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-56967"] Sep 30 17:23:24 crc kubenswrapper[4818]: I0930 17:23:24.371185 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c22dc5f8-114e-4b46-9f08-4936a8972056","Type":"ContainerStarted","Data":"c677e548b2ae99b033af80d3b3c4ba0530ec48d94b6729e401c10604f452db27"} Sep 30 17:23:24 crc kubenswrapper[4818]: I0930 17:23:24.371276 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:24 crc kubenswrapper[4818]: I0930 17:23:24.372714 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-56967" event={"ID":"1577ac1d-bc53-4505-a4c0-9e64a16fca2b","Type":"ContainerStarted","Data":"db4d7f7113311663e08727dfa2bf13bba638789a8e4fe4466a16e68695fc1a5c"} Sep 30 17:23:24 crc kubenswrapper[4818]: I0930 17:23:24.399950 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=2.178475517 podStartE2EDuration="6.399919365s" podCreationTimestamp="2025-09-30 17:23:18 +0000 UTC" firstStartedPulling="2025-09-30 17:23:19.111044265 +0000 UTC m=+1445.865316081" lastFinishedPulling="2025-09-30 17:23:23.332488103 +0000 UTC m=+1450.086759929" observedRunningTime="2025-09-30 17:23:24.399143494 +0000 UTC m=+1451.153415320" watchObservedRunningTime="2025-09-30 17:23:24.399919365 +0000 UTC m=+1451.154191181" Sep 30 17:23:25 crc kubenswrapper[4818]: I0930 17:23:25.389403 4818 generic.go:334] "Generic (PLEG): container finished" podID="1577ac1d-bc53-4505-a4c0-9e64a16fca2b" containerID="7f4b87f25bc5e13a9c2d24e1b740a83ef7e5cf7aaa2c413bf4ebcafcd1a315d1" exitCode=0 Sep 30 17:23:25 crc kubenswrapper[4818]: I0930 17:23:25.392614 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-56967" event={"ID":"1577ac1d-bc53-4505-a4c0-9e64a16fca2b","Type":"ContainerDied","Data":"7f4b87f25bc5e13a9c2d24e1b740a83ef7e5cf7aaa2c413bf4ebcafcd1a315d1"} Sep 30 17:23:26 crc kubenswrapper[4818]: I0930 17:23:26.292277 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tspg2" Sep 30 17:23:26 crc kubenswrapper[4818]: I0930 17:23:26.292610 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tspg2" Sep 30 17:23:26 crc kubenswrapper[4818]: I0930 17:23:26.342632 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tspg2" Sep 30 17:23:26 crc kubenswrapper[4818]: I0930 17:23:26.512887 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tspg2" Sep 30 17:23:27 crc kubenswrapper[4818]: I0930 17:23:27.406712 4818 generic.go:334] "Generic (PLEG): container finished" podID="1577ac1d-bc53-4505-a4c0-9e64a16fca2b" containerID="1dba983f91f3438406cb435b6939e23589a6bf675d93a9f9ca8b3ca97e6ce08c" exitCode=0 Sep 30 17:23:27 crc kubenswrapper[4818]: I0930 17:23:27.407868 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-56967" event={"ID":"1577ac1d-bc53-4505-a4c0-9e64a16fca2b","Type":"ContainerDied","Data":"1dba983f91f3438406cb435b6939e23589a6bf675d93a9f9ca8b3ca97e6ce08c"} Sep 30 17:23:27 crc kubenswrapper[4818]: I0930 17:23:27.707437 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tspg2"] Sep 30 17:23:28 crc kubenswrapper[4818]: I0930 17:23:28.418301 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-56967" event={"ID":"1577ac1d-bc53-4505-a4c0-9e64a16fca2b","Type":"ContainerStarted","Data":"3fc7a1795b9a95161b6548f2f3e98b7f31f60fd2e692b707ba54a2326ad19aaf"} Sep 30 17:23:28 crc kubenswrapper[4818]: I0930 17:23:28.418468 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tspg2" podUID="ab36dfb2-948b-415b-8aa4-f6a80e40b22c" containerName="registry-server" containerID="cri-o://04a7abfa7afd9d74dc59368a3af64149140e061912f167407d3db7ccde19cff2" gracePeriod=2 Sep 30 17:23:28 crc kubenswrapper[4818]: I0930 17:23:28.443060 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-56967" podStartSLOduration=2.8412382149999997 podStartE2EDuration="5.443036592s" podCreationTimestamp="2025-09-30 17:23:23 +0000 UTC" firstStartedPulling="2025-09-30 17:23:25.394115349 +0000 UTC m=+1452.148387205" lastFinishedPulling="2025-09-30 17:23:27.995913766 +0000 UTC m=+1454.750185582" observedRunningTime="2025-09-30 17:23:28.440577666 +0000 UTC m=+1455.194849492" watchObservedRunningTime="2025-09-30 17:23:28.443036592 +0000 UTC m=+1455.197308438" Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.435408 4818 generic.go:334] "Generic (PLEG): container finished" podID="ab36dfb2-948b-415b-8aa4-f6a80e40b22c" containerID="04a7abfa7afd9d74dc59368a3af64149140e061912f167407d3db7ccde19cff2" exitCode=0 Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.435509 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tspg2" event={"ID":"ab36dfb2-948b-415b-8aa4-f6a80e40b22c","Type":"ContainerDied","Data":"04a7abfa7afd9d74dc59368a3af64149140e061912f167407d3db7ccde19cff2"} Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.435908 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tspg2" event={"ID":"ab36dfb2-948b-415b-8aa4-f6a80e40b22c","Type":"ContainerDied","Data":"7c3946943eae44abcf3dbce802313de3d21e9e8060dbabd7d8bc57929db4becb"} Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.435951 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c3946943eae44abcf3dbce802313de3d21e9e8060dbabd7d8bc57929db4becb" Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.490679 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tspg2" Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.553136 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab36dfb2-948b-415b-8aa4-f6a80e40b22c-catalog-content\") pod \"ab36dfb2-948b-415b-8aa4-f6a80e40b22c\" (UID: \"ab36dfb2-948b-415b-8aa4-f6a80e40b22c\") " Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.553413 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab36dfb2-948b-415b-8aa4-f6a80e40b22c-utilities\") pod \"ab36dfb2-948b-415b-8aa4-f6a80e40b22c\" (UID: \"ab36dfb2-948b-415b-8aa4-f6a80e40b22c\") " Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.553643 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nvb7j\" (UniqueName: \"kubernetes.io/projected/ab36dfb2-948b-415b-8aa4-f6a80e40b22c-kube-api-access-nvb7j\") pod \"ab36dfb2-948b-415b-8aa4-f6a80e40b22c\" (UID: \"ab36dfb2-948b-415b-8aa4-f6a80e40b22c\") " Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.554509 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab36dfb2-948b-415b-8aa4-f6a80e40b22c-utilities" (OuterVolumeSpecName: "utilities") pod "ab36dfb2-948b-415b-8aa4-f6a80e40b22c" (UID: "ab36dfb2-948b-415b-8aa4-f6a80e40b22c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.555637 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab36dfb2-948b-415b-8aa4-f6a80e40b22c-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.564467 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab36dfb2-948b-415b-8aa4-f6a80e40b22c-kube-api-access-nvb7j" (OuterVolumeSpecName: "kube-api-access-nvb7j") pod "ab36dfb2-948b-415b-8aa4-f6a80e40b22c" (UID: "ab36dfb2-948b-415b-8aa4-f6a80e40b22c"). InnerVolumeSpecName "kube-api-access-nvb7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.580462 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab36dfb2-948b-415b-8aa4-f6a80e40b22c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ab36dfb2-948b-415b-8aa4-f6a80e40b22c" (UID: "ab36dfb2-948b-415b-8aa4-f6a80e40b22c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.657323 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nvb7j\" (UniqueName: \"kubernetes.io/projected/ab36dfb2-948b-415b-8aa4-f6a80e40b22c-kube-api-access-nvb7j\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.657357 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab36dfb2-948b-415b-8aa4-f6a80e40b22c-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.736165 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-07c2-account-create-xrzbk"] Sep 30 17:23:29 crc kubenswrapper[4818]: E0930 17:23:29.736536 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab36dfb2-948b-415b-8aa4-f6a80e40b22c" containerName="registry-server" Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.736568 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab36dfb2-948b-415b-8aa4-f6a80e40b22c" containerName="registry-server" Sep 30 17:23:29 crc kubenswrapper[4818]: E0930 17:23:29.736586 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab36dfb2-948b-415b-8aa4-f6a80e40b22c" containerName="extract-utilities" Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.736594 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab36dfb2-948b-415b-8aa4-f6a80e40b22c" containerName="extract-utilities" Sep 30 17:23:29 crc kubenswrapper[4818]: E0930 17:23:29.736607 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab36dfb2-948b-415b-8aa4-f6a80e40b22c" containerName="extract-content" Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.736614 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab36dfb2-948b-415b-8aa4-f6a80e40b22c" containerName="extract-content" Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.736799 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab36dfb2-948b-415b-8aa4-f6a80e40b22c" containerName="registry-server" Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.737375 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-07c2-account-create-xrzbk" Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.739731 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-db-secret" Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.746132 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-07c2-account-create-xrzbk"] Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.758914 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbxf8\" (UniqueName: \"kubernetes.io/projected/2012bb72-3bb7-4725-b0c7-4e8a6c27a176-kube-api-access-tbxf8\") pod \"watcher-07c2-account-create-xrzbk\" (UID: \"2012bb72-3bb7-4725-b0c7-4e8a6c27a176\") " pod="watcher-kuttl-default/watcher-07c2-account-create-xrzbk" Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.860348 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbxf8\" (UniqueName: \"kubernetes.io/projected/2012bb72-3bb7-4725-b0c7-4e8a6c27a176-kube-api-access-tbxf8\") pod \"watcher-07c2-account-create-xrzbk\" (UID: \"2012bb72-3bb7-4725-b0c7-4e8a6c27a176\") " pod="watcher-kuttl-default/watcher-07c2-account-create-xrzbk" Sep 30 17:23:29 crc kubenswrapper[4818]: I0930 17:23:29.892176 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbxf8\" (UniqueName: \"kubernetes.io/projected/2012bb72-3bb7-4725-b0c7-4e8a6c27a176-kube-api-access-tbxf8\") pod \"watcher-07c2-account-create-xrzbk\" (UID: \"2012bb72-3bb7-4725-b0c7-4e8a6c27a176\") " pod="watcher-kuttl-default/watcher-07c2-account-create-xrzbk" Sep 30 17:23:30 crc kubenswrapper[4818]: I0930 17:23:30.062877 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-07c2-account-create-xrzbk" Sep 30 17:23:30 crc kubenswrapper[4818]: I0930 17:23:30.443430 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tspg2" Sep 30 17:23:30 crc kubenswrapper[4818]: I0930 17:23:30.462445 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tspg2"] Sep 30 17:23:30 crc kubenswrapper[4818]: I0930 17:23:30.470253 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tspg2"] Sep 30 17:23:30 crc kubenswrapper[4818]: I0930 17:23:30.546474 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-07c2-account-create-xrzbk"] Sep 30 17:23:31 crc kubenswrapper[4818]: I0930 17:23:31.460669 4818 generic.go:334] "Generic (PLEG): container finished" podID="2012bb72-3bb7-4725-b0c7-4e8a6c27a176" containerID="2b71b8c7c0932ed04e3e19f535583209888aef2dd13c8d3a7d174f4aee786c6e" exitCode=0 Sep 30 17:23:31 crc kubenswrapper[4818]: I0930 17:23:31.461318 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-07c2-account-create-xrzbk" event={"ID":"2012bb72-3bb7-4725-b0c7-4e8a6c27a176","Type":"ContainerDied","Data":"2b71b8c7c0932ed04e3e19f535583209888aef2dd13c8d3a7d174f4aee786c6e"} Sep 30 17:23:31 crc kubenswrapper[4818]: I0930 17:23:31.461359 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-07c2-account-create-xrzbk" event={"ID":"2012bb72-3bb7-4725-b0c7-4e8a6c27a176","Type":"ContainerStarted","Data":"e952ed78afb665d9b68dc3473cea927d4df4c003ecc3d95b17f3788cbb110cfa"} Sep 30 17:23:32 crc kubenswrapper[4818]: I0930 17:23:32.035298 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab36dfb2-948b-415b-8aa4-f6a80e40b22c" path="/var/lib/kubelet/pods/ab36dfb2-948b-415b-8aa4-f6a80e40b22c/volumes" Sep 30 17:23:32 crc kubenswrapper[4818]: I0930 17:23:32.877248 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-07c2-account-create-xrzbk" Sep 30 17:23:32 crc kubenswrapper[4818]: I0930 17:23:32.917294 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tbxf8\" (UniqueName: \"kubernetes.io/projected/2012bb72-3bb7-4725-b0c7-4e8a6c27a176-kube-api-access-tbxf8\") pod \"2012bb72-3bb7-4725-b0c7-4e8a6c27a176\" (UID: \"2012bb72-3bb7-4725-b0c7-4e8a6c27a176\") " Sep 30 17:23:32 crc kubenswrapper[4818]: I0930 17:23:32.922270 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2012bb72-3bb7-4725-b0c7-4e8a6c27a176-kube-api-access-tbxf8" (OuterVolumeSpecName: "kube-api-access-tbxf8") pod "2012bb72-3bb7-4725-b0c7-4e8a6c27a176" (UID: "2012bb72-3bb7-4725-b0c7-4e8a6c27a176"). InnerVolumeSpecName "kube-api-access-tbxf8". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:23:33 crc kubenswrapper[4818]: I0930 17:23:33.019200 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tbxf8\" (UniqueName: \"kubernetes.io/projected/2012bb72-3bb7-4725-b0c7-4e8a6c27a176-kube-api-access-tbxf8\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:33 crc kubenswrapper[4818]: I0930 17:23:33.481450 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-07c2-account-create-xrzbk" event={"ID":"2012bb72-3bb7-4725-b0c7-4e8a6c27a176","Type":"ContainerDied","Data":"e952ed78afb665d9b68dc3473cea927d4df4c003ecc3d95b17f3788cbb110cfa"} Sep 30 17:23:33 crc kubenswrapper[4818]: I0930 17:23:33.481496 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-07c2-account-create-xrzbk" Sep 30 17:23:33 crc kubenswrapper[4818]: I0930 17:23:33.481516 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e952ed78afb665d9b68dc3473cea927d4df4c003ecc3d95b17f3788cbb110cfa" Sep 30 17:23:33 crc kubenswrapper[4818]: I0930 17:23:33.743314 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-56967" Sep 30 17:23:33 crc kubenswrapper[4818]: I0930 17:23:33.743395 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-56967" Sep 30 17:23:33 crc kubenswrapper[4818]: I0930 17:23:33.813061 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-56967" Sep 30 17:23:34 crc kubenswrapper[4818]: I0930 17:23:34.548816 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-56967" Sep 30 17:23:34 crc kubenswrapper[4818]: I0930 17:23:34.989761 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-gft8b"] Sep 30 17:23:34 crc kubenswrapper[4818]: E0930 17:23:34.990151 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2012bb72-3bb7-4725-b0c7-4e8a6c27a176" containerName="mariadb-account-create" Sep 30 17:23:34 crc kubenswrapper[4818]: I0930 17:23:34.990172 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="2012bb72-3bb7-4725-b0c7-4e8a6c27a176" containerName="mariadb-account-create" Sep 30 17:23:34 crc kubenswrapper[4818]: I0930 17:23:34.990395 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="2012bb72-3bb7-4725-b0c7-4e8a6c27a176" containerName="mariadb-account-create" Sep 30 17:23:34 crc kubenswrapper[4818]: I0930 17:23:34.990962 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" Sep 30 17:23:34 crc kubenswrapper[4818]: I0930 17:23:34.992667 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-qlqdg" Sep 30 17:23:34 crc kubenswrapper[4818]: I0930 17:23:34.992882 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-config-data" Sep 30 17:23:35 crc kubenswrapper[4818]: I0930 17:23:35.005233 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-gft8b"] Sep 30 17:23:35 crc kubenswrapper[4818]: I0930 17:23:35.049614 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d0db22d-0ea6-41fe-a00e-56a39f206da4-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-gft8b\" (UID: \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" Sep 30 17:23:35 crc kubenswrapper[4818]: I0930 17:23:35.049762 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d0db22d-0ea6-41fe-a00e-56a39f206da4-config-data\") pod \"watcher-kuttl-db-sync-gft8b\" (UID: \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" Sep 30 17:23:35 crc kubenswrapper[4818]: I0930 17:23:35.049804 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7d0db22d-0ea6-41fe-a00e-56a39f206da4-db-sync-config-data\") pod \"watcher-kuttl-db-sync-gft8b\" (UID: \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" Sep 30 17:23:35 crc kubenswrapper[4818]: I0930 17:23:35.049861 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nvfk\" (UniqueName: \"kubernetes.io/projected/7d0db22d-0ea6-41fe-a00e-56a39f206da4-kube-api-access-5nvfk\") pod \"watcher-kuttl-db-sync-gft8b\" (UID: \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" Sep 30 17:23:35 crc kubenswrapper[4818]: I0930 17:23:35.151330 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d0db22d-0ea6-41fe-a00e-56a39f206da4-config-data\") pod \"watcher-kuttl-db-sync-gft8b\" (UID: \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" Sep 30 17:23:35 crc kubenswrapper[4818]: I0930 17:23:35.151394 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7d0db22d-0ea6-41fe-a00e-56a39f206da4-db-sync-config-data\") pod \"watcher-kuttl-db-sync-gft8b\" (UID: \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" Sep 30 17:23:35 crc kubenswrapper[4818]: I0930 17:23:35.151456 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nvfk\" (UniqueName: \"kubernetes.io/projected/7d0db22d-0ea6-41fe-a00e-56a39f206da4-kube-api-access-5nvfk\") pod \"watcher-kuttl-db-sync-gft8b\" (UID: \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" Sep 30 17:23:35 crc kubenswrapper[4818]: I0930 17:23:35.151542 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d0db22d-0ea6-41fe-a00e-56a39f206da4-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-gft8b\" (UID: \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" Sep 30 17:23:35 crc kubenswrapper[4818]: I0930 17:23:35.157207 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d0db22d-0ea6-41fe-a00e-56a39f206da4-config-data\") pod \"watcher-kuttl-db-sync-gft8b\" (UID: \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" Sep 30 17:23:35 crc kubenswrapper[4818]: I0930 17:23:35.157374 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7d0db22d-0ea6-41fe-a00e-56a39f206da4-db-sync-config-data\") pod \"watcher-kuttl-db-sync-gft8b\" (UID: \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" Sep 30 17:23:35 crc kubenswrapper[4818]: I0930 17:23:35.160472 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d0db22d-0ea6-41fe-a00e-56a39f206da4-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-gft8b\" (UID: \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" Sep 30 17:23:35 crc kubenswrapper[4818]: I0930 17:23:35.176379 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nvfk\" (UniqueName: \"kubernetes.io/projected/7d0db22d-0ea6-41fe-a00e-56a39f206da4-kube-api-access-5nvfk\") pod \"watcher-kuttl-db-sync-gft8b\" (UID: \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" Sep 30 17:23:35 crc kubenswrapper[4818]: I0930 17:23:35.355416 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" Sep 30 17:23:35 crc kubenswrapper[4818]: I0930 17:23:35.795572 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-gft8b"] Sep 30 17:23:35 crc kubenswrapper[4818]: W0930 17:23:35.799799 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7d0db22d_0ea6_41fe_a00e_56a39f206da4.slice/crio-7de915c2b4e7b64f9b578fd3202a0c2e930e612582d621d520ed80636fbcfcad WatchSource:0}: Error finding container 7de915c2b4e7b64f9b578fd3202a0c2e930e612582d621d520ed80636fbcfcad: Status 404 returned error can't find the container with id 7de915c2b4e7b64f9b578fd3202a0c2e930e612582d621d520ed80636fbcfcad Sep 30 17:23:36 crc kubenswrapper[4818]: I0930 17:23:36.099604 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-56967"] Sep 30 17:23:36 crc kubenswrapper[4818]: I0930 17:23:36.515901 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" event={"ID":"7d0db22d-0ea6-41fe-a00e-56a39f206da4","Type":"ContainerStarted","Data":"24be82590247ba81f18514ac126b9d64df72666b58ca865e1441f1d433c6ff25"} Sep 30 17:23:36 crc kubenswrapper[4818]: I0930 17:23:36.516999 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" event={"ID":"7d0db22d-0ea6-41fe-a00e-56a39f206da4","Type":"ContainerStarted","Data":"7de915c2b4e7b64f9b578fd3202a0c2e930e612582d621d520ed80636fbcfcad"} Sep 30 17:23:36 crc kubenswrapper[4818]: I0930 17:23:36.516911 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-56967" podUID="1577ac1d-bc53-4505-a4c0-9e64a16fca2b" containerName="registry-server" containerID="cri-o://3fc7a1795b9a95161b6548f2f3e98b7f31f60fd2e692b707ba54a2326ad19aaf" gracePeriod=2 Sep 30 17:23:36 crc kubenswrapper[4818]: I0930 17:23:36.536795 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" podStartSLOduration=2.5367535390000002 podStartE2EDuration="2.536753539s" podCreationTimestamp="2025-09-30 17:23:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:23:36.530688275 +0000 UTC m=+1463.284960101" watchObservedRunningTime="2025-09-30 17:23:36.536753539 +0000 UTC m=+1463.291025355" Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.066110 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-56967" Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.184728 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1577ac1d-bc53-4505-a4c0-9e64a16fca2b-catalog-content\") pod \"1577ac1d-bc53-4505-a4c0-9e64a16fca2b\" (UID: \"1577ac1d-bc53-4505-a4c0-9e64a16fca2b\") " Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.185029 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d5jws\" (UniqueName: \"kubernetes.io/projected/1577ac1d-bc53-4505-a4c0-9e64a16fca2b-kube-api-access-d5jws\") pod \"1577ac1d-bc53-4505-a4c0-9e64a16fca2b\" (UID: \"1577ac1d-bc53-4505-a4c0-9e64a16fca2b\") " Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.185221 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1577ac1d-bc53-4505-a4c0-9e64a16fca2b-utilities\") pod \"1577ac1d-bc53-4505-a4c0-9e64a16fca2b\" (UID: \"1577ac1d-bc53-4505-a4c0-9e64a16fca2b\") " Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.187118 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1577ac1d-bc53-4505-a4c0-9e64a16fca2b-utilities" (OuterVolumeSpecName: "utilities") pod "1577ac1d-bc53-4505-a4c0-9e64a16fca2b" (UID: "1577ac1d-bc53-4505-a4c0-9e64a16fca2b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.191002 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1577ac1d-bc53-4505-a4c0-9e64a16fca2b-kube-api-access-d5jws" (OuterVolumeSpecName: "kube-api-access-d5jws") pod "1577ac1d-bc53-4505-a4c0-9e64a16fca2b" (UID: "1577ac1d-bc53-4505-a4c0-9e64a16fca2b"). InnerVolumeSpecName "kube-api-access-d5jws". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.245136 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1577ac1d-bc53-4505-a4c0-9e64a16fca2b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1577ac1d-bc53-4505-a4c0-9e64a16fca2b" (UID: "1577ac1d-bc53-4505-a4c0-9e64a16fca2b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.287862 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d5jws\" (UniqueName: \"kubernetes.io/projected/1577ac1d-bc53-4505-a4c0-9e64a16fca2b-kube-api-access-d5jws\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.287903 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1577ac1d-bc53-4505-a4c0-9e64a16fca2b-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.287916 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1577ac1d-bc53-4505-a4c0-9e64a16fca2b-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.526370 4818 generic.go:334] "Generic (PLEG): container finished" podID="1577ac1d-bc53-4505-a4c0-9e64a16fca2b" containerID="3fc7a1795b9a95161b6548f2f3e98b7f31f60fd2e692b707ba54a2326ad19aaf" exitCode=0 Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.526454 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-56967" Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.526832 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-56967" event={"ID":"1577ac1d-bc53-4505-a4c0-9e64a16fca2b","Type":"ContainerDied","Data":"3fc7a1795b9a95161b6548f2f3e98b7f31f60fd2e692b707ba54a2326ad19aaf"} Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.527071 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-56967" event={"ID":"1577ac1d-bc53-4505-a4c0-9e64a16fca2b","Type":"ContainerDied","Data":"db4d7f7113311663e08727dfa2bf13bba638789a8e4fe4466a16e68695fc1a5c"} Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.527236 4818 scope.go:117] "RemoveContainer" containerID="3fc7a1795b9a95161b6548f2f3e98b7f31f60fd2e692b707ba54a2326ad19aaf" Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.556117 4818 scope.go:117] "RemoveContainer" containerID="1dba983f91f3438406cb435b6939e23589a6bf675d93a9f9ca8b3ca97e6ce08c" Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.570874 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-56967"] Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.578744 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-56967"] Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.590735 4818 scope.go:117] "RemoveContainer" containerID="7f4b87f25bc5e13a9c2d24e1b740a83ef7e5cf7aaa2c413bf4ebcafcd1a315d1" Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.619970 4818 scope.go:117] "RemoveContainer" containerID="3fc7a1795b9a95161b6548f2f3e98b7f31f60fd2e692b707ba54a2326ad19aaf" Sep 30 17:23:37 crc kubenswrapper[4818]: E0930 17:23:37.620457 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3fc7a1795b9a95161b6548f2f3e98b7f31f60fd2e692b707ba54a2326ad19aaf\": container with ID starting with 3fc7a1795b9a95161b6548f2f3e98b7f31f60fd2e692b707ba54a2326ad19aaf not found: ID does not exist" containerID="3fc7a1795b9a95161b6548f2f3e98b7f31f60fd2e692b707ba54a2326ad19aaf" Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.620506 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3fc7a1795b9a95161b6548f2f3e98b7f31f60fd2e692b707ba54a2326ad19aaf"} err="failed to get container status \"3fc7a1795b9a95161b6548f2f3e98b7f31f60fd2e692b707ba54a2326ad19aaf\": rpc error: code = NotFound desc = could not find container \"3fc7a1795b9a95161b6548f2f3e98b7f31f60fd2e692b707ba54a2326ad19aaf\": container with ID starting with 3fc7a1795b9a95161b6548f2f3e98b7f31f60fd2e692b707ba54a2326ad19aaf not found: ID does not exist" Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.620532 4818 scope.go:117] "RemoveContainer" containerID="1dba983f91f3438406cb435b6939e23589a6bf675d93a9f9ca8b3ca97e6ce08c" Sep 30 17:23:37 crc kubenswrapper[4818]: E0930 17:23:37.620806 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1dba983f91f3438406cb435b6939e23589a6bf675d93a9f9ca8b3ca97e6ce08c\": container with ID starting with 1dba983f91f3438406cb435b6939e23589a6bf675d93a9f9ca8b3ca97e6ce08c not found: ID does not exist" containerID="1dba983f91f3438406cb435b6939e23589a6bf675d93a9f9ca8b3ca97e6ce08c" Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.620829 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dba983f91f3438406cb435b6939e23589a6bf675d93a9f9ca8b3ca97e6ce08c"} err="failed to get container status \"1dba983f91f3438406cb435b6939e23589a6bf675d93a9f9ca8b3ca97e6ce08c\": rpc error: code = NotFound desc = could not find container \"1dba983f91f3438406cb435b6939e23589a6bf675d93a9f9ca8b3ca97e6ce08c\": container with ID starting with 1dba983f91f3438406cb435b6939e23589a6bf675d93a9f9ca8b3ca97e6ce08c not found: ID does not exist" Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.620842 4818 scope.go:117] "RemoveContainer" containerID="7f4b87f25bc5e13a9c2d24e1b740a83ef7e5cf7aaa2c413bf4ebcafcd1a315d1" Sep 30 17:23:37 crc kubenswrapper[4818]: E0930 17:23:37.621097 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f4b87f25bc5e13a9c2d24e1b740a83ef7e5cf7aaa2c413bf4ebcafcd1a315d1\": container with ID starting with 7f4b87f25bc5e13a9c2d24e1b740a83ef7e5cf7aaa2c413bf4ebcafcd1a315d1 not found: ID does not exist" containerID="7f4b87f25bc5e13a9c2d24e1b740a83ef7e5cf7aaa2c413bf4ebcafcd1a315d1" Sep 30 17:23:37 crc kubenswrapper[4818]: I0930 17:23:37.621114 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f4b87f25bc5e13a9c2d24e1b740a83ef7e5cf7aaa2c413bf4ebcafcd1a315d1"} err="failed to get container status \"7f4b87f25bc5e13a9c2d24e1b740a83ef7e5cf7aaa2c413bf4ebcafcd1a315d1\": rpc error: code = NotFound desc = could not find container \"7f4b87f25bc5e13a9c2d24e1b740a83ef7e5cf7aaa2c413bf4ebcafcd1a315d1\": container with ID starting with 7f4b87f25bc5e13a9c2d24e1b740a83ef7e5cf7aaa2c413bf4ebcafcd1a315d1 not found: ID does not exist" Sep 30 17:23:38 crc kubenswrapper[4818]: I0930 17:23:38.034587 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1577ac1d-bc53-4505-a4c0-9e64a16fca2b" path="/var/lib/kubelet/pods/1577ac1d-bc53-4505-a4c0-9e64a16fca2b/volumes" Sep 30 17:23:39 crc kubenswrapper[4818]: I0930 17:23:39.544964 4818 generic.go:334] "Generic (PLEG): container finished" podID="7d0db22d-0ea6-41fe-a00e-56a39f206da4" containerID="24be82590247ba81f18514ac126b9d64df72666b58ca865e1441f1d433c6ff25" exitCode=0 Sep 30 17:23:39 crc kubenswrapper[4818]: I0930 17:23:39.545058 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" event={"ID":"7d0db22d-0ea6-41fe-a00e-56a39f206da4","Type":"ContainerDied","Data":"24be82590247ba81f18514ac126b9d64df72666b58ca865e1441f1d433c6ff25"} Sep 30 17:23:40 crc kubenswrapper[4818]: I0930 17:23:40.933269 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.048177 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5nvfk\" (UniqueName: \"kubernetes.io/projected/7d0db22d-0ea6-41fe-a00e-56a39f206da4-kube-api-access-5nvfk\") pod \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\" (UID: \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\") " Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.049038 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d0db22d-0ea6-41fe-a00e-56a39f206da4-config-data\") pod \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\" (UID: \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\") " Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.049071 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7d0db22d-0ea6-41fe-a00e-56a39f206da4-db-sync-config-data\") pod \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\" (UID: \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\") " Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.049155 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d0db22d-0ea6-41fe-a00e-56a39f206da4-combined-ca-bundle\") pod \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\" (UID: \"7d0db22d-0ea6-41fe-a00e-56a39f206da4\") " Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.054349 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d0db22d-0ea6-41fe-a00e-56a39f206da4-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "7d0db22d-0ea6-41fe-a00e-56a39f206da4" (UID: "7d0db22d-0ea6-41fe-a00e-56a39f206da4"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.061078 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d0db22d-0ea6-41fe-a00e-56a39f206da4-kube-api-access-5nvfk" (OuterVolumeSpecName: "kube-api-access-5nvfk") pod "7d0db22d-0ea6-41fe-a00e-56a39f206da4" (UID: "7d0db22d-0ea6-41fe-a00e-56a39f206da4"). InnerVolumeSpecName "kube-api-access-5nvfk". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.073076 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d0db22d-0ea6-41fe-a00e-56a39f206da4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7d0db22d-0ea6-41fe-a00e-56a39f206da4" (UID: "7d0db22d-0ea6-41fe-a00e-56a39f206da4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.100630 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d0db22d-0ea6-41fe-a00e-56a39f206da4-config-data" (OuterVolumeSpecName: "config-data") pod "7d0db22d-0ea6-41fe-a00e-56a39f206da4" (UID: "7d0db22d-0ea6-41fe-a00e-56a39f206da4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.151575 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5nvfk\" (UniqueName: \"kubernetes.io/projected/7d0db22d-0ea6-41fe-a00e-56a39f206da4-kube-api-access-5nvfk\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.151617 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d0db22d-0ea6-41fe-a00e-56a39f206da4-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.151629 4818 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7d0db22d-0ea6-41fe-a00e-56a39f206da4-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.151639 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d0db22d-0ea6-41fe-a00e-56a39f206da4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.565041 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" event={"ID":"7d0db22d-0ea6-41fe-a00e-56a39f206da4","Type":"ContainerDied","Data":"7de915c2b4e7b64f9b578fd3202a0c2e930e612582d621d520ed80636fbcfcad"} Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.565081 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7de915c2b4e7b64f9b578fd3202a0c2e930e612582d621d520ed80636fbcfcad" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.565319 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-gft8b" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.819713 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:23:41 crc kubenswrapper[4818]: E0930 17:23:41.820299 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1577ac1d-bc53-4505-a4c0-9e64a16fca2b" containerName="extract-content" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.820381 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="1577ac1d-bc53-4505-a4c0-9e64a16fca2b" containerName="extract-content" Sep 30 17:23:41 crc kubenswrapper[4818]: E0930 17:23:41.820442 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d0db22d-0ea6-41fe-a00e-56a39f206da4" containerName="watcher-kuttl-db-sync" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.820501 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d0db22d-0ea6-41fe-a00e-56a39f206da4" containerName="watcher-kuttl-db-sync" Sep 30 17:23:41 crc kubenswrapper[4818]: E0930 17:23:41.820561 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1577ac1d-bc53-4505-a4c0-9e64a16fca2b" containerName="extract-utilities" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.820610 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="1577ac1d-bc53-4505-a4c0-9e64a16fca2b" containerName="extract-utilities" Sep 30 17:23:41 crc kubenswrapper[4818]: E0930 17:23:41.820681 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1577ac1d-bc53-4505-a4c0-9e64a16fca2b" containerName="registry-server" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.820736 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="1577ac1d-bc53-4505-a4c0-9e64a16fca2b" containerName="registry-server" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.820914 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d0db22d-0ea6-41fe-a00e-56a39f206da4" containerName="watcher-kuttl-db-sync" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.821006 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="1577ac1d-bc53-4505-a4c0-9e64a16fca2b" containerName="registry-server" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.827859 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.830138 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.830494 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-qlqdg" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.846057 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.847464 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.850127 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-applier-config-data" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.854091 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.882805 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.912086 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.913366 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.915772 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.929959 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.962741 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fmgw\" (UniqueName: \"kubernetes.io/projected/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-kube-api-access-9fmgw\") pod \"watcher-kuttl-api-0\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.962805 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ptnx6\" (UniqueName: \"kubernetes.io/projected/5d0e7f69-3396-40cc-8927-d9538019bb0e-kube-api-access-ptnx6\") pod \"watcher-kuttl-applier-0\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.962830 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.962858 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.962877 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d0e7f69-3396-40cc-8927-d9538019bb0e-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.962905 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d0e7f69-3396-40cc-8927-d9538019bb0e-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.962999 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/5d0e7f69-3396-40cc-8927-d9538019bb0e-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.963024 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-logs\") pod \"watcher-kuttl-api-0\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.963058 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.963087 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d0e7f69-3396-40cc-8927-d9538019bb0e-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:41 crc kubenswrapper[4818]: I0930 17:23:41.963104 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.064569 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fmgw\" (UniqueName: \"kubernetes.io/projected/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-kube-api-access-9fmgw\") pod \"watcher-kuttl-api-0\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.064625 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.064655 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ptnx6\" (UniqueName: \"kubernetes.io/projected/5d0e7f69-3396-40cc-8927-d9538019bb0e-kube-api-access-ptnx6\") pod \"watcher-kuttl-applier-0\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.064678 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.064752 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.064825 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.064864 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d0e7f69-3396-40cc-8927-d9538019bb0e-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.064971 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d0e7f69-3396-40cc-8927-d9538019bb0e-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.064996 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/5d0e7f69-3396-40cc-8927-d9538019bb0e-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.065045 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-logs\") pod \"watcher-kuttl-api-0\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.065133 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.065156 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.065185 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.065257 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pd4xp\" (UniqueName: \"kubernetes.io/projected/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-kube-api-access-pd4xp\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.065308 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.065330 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d0e7f69-3396-40cc-8927-d9538019bb0e-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.065355 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.065482 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d0e7f69-3396-40cc-8927-d9538019bb0e-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.065582 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-logs\") pod \"watcher-kuttl-api-0\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.073894 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.074036 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d0e7f69-3396-40cc-8927-d9538019bb0e-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.074045 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.074238 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/5d0e7f69-3396-40cc-8927-d9538019bb0e-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.074383 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.080505 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d0e7f69-3396-40cc-8927-d9538019bb0e-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.085581 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptnx6\" (UniqueName: \"kubernetes.io/projected/5d0e7f69-3396-40cc-8927-d9538019bb0e-kube-api-access-ptnx6\") pod \"watcher-kuttl-applier-0\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.085693 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.088990 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fmgw\" (UniqueName: \"kubernetes.io/projected/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-kube-api-access-9fmgw\") pod \"watcher-kuttl-api-0\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.142848 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.166459 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.167182 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.167287 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.167611 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.168055 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.168094 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.168129 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pd4xp\" (UniqueName: \"kubernetes.io/projected/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-kube-api-access-pd4xp\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.168213 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.170491 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.171725 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.172497 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.178329 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.186283 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pd4xp\" (UniqueName: \"kubernetes.io/projected/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-kube-api-access-pd4xp\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.229432 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.634220 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.749536 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:23:42 crc kubenswrapper[4818]: I0930 17:23:42.824863 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:23:42 crc kubenswrapper[4818]: W0930 17:23:42.828274 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod20d19e7b_7835_4ba3_9b20_cc7b0080b1ac.slice/crio-091238b6f77973496c1d8aac8c3d6e752bdd13fd42c97fc8088c915737a07441 WatchSource:0}: Error finding container 091238b6f77973496c1d8aac8c3d6e752bdd13fd42c97fc8088c915737a07441: Status 404 returned error can't find the container with id 091238b6f77973496c1d8aac8c3d6e752bdd13fd42c97fc8088c915737a07441 Sep 30 17:23:43 crc kubenswrapper[4818]: I0930 17:23:43.580816 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"5d0e7f69-3396-40cc-8927-d9538019bb0e","Type":"ContainerStarted","Data":"9d1f4ef586382d62353fdae22893dd9c08ef1229bd482c4822eae9116d26ce1b"} Sep 30 17:23:43 crc kubenswrapper[4818]: I0930 17:23:43.581407 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"5d0e7f69-3396-40cc-8927-d9538019bb0e","Type":"ContainerStarted","Data":"ef5de5a432d269f458e67b950838991be3a60f47fd5a7f5200a542f4561f3eb7"} Sep 30 17:23:43 crc kubenswrapper[4818]: I0930 17:23:43.583247 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8","Type":"ContainerStarted","Data":"8781cd53abef7ae91f619e74e058d9e61486305b7d2dc8b58f751c4abeabe168"} Sep 30 17:23:43 crc kubenswrapper[4818]: I0930 17:23:43.583278 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8","Type":"ContainerStarted","Data":"831b3884d96cc7c06ee697a478fccf983e6066b0450b92464b2875b159fea76a"} Sep 30 17:23:43 crc kubenswrapper[4818]: I0930 17:23:43.583335 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8","Type":"ContainerStarted","Data":"7bb0edb16c29751916d9a012a41047da582a80c53e0a8eab37eb410bd85a66d9"} Sep 30 17:23:43 crc kubenswrapper[4818]: I0930 17:23:43.583401 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:43 crc kubenswrapper[4818]: I0930 17:23:43.585370 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac","Type":"ContainerStarted","Data":"fb9a5e81cff43843a8e7c216f77acb36240a05078bc246e14d73f0c76f6a151e"} Sep 30 17:23:43 crc kubenswrapper[4818]: I0930 17:23:43.585403 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac","Type":"ContainerStarted","Data":"091238b6f77973496c1d8aac8c3d6e752bdd13fd42c97fc8088c915737a07441"} Sep 30 17:23:43 crc kubenswrapper[4818]: I0930 17:23:43.601293 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podStartSLOduration=2.601275615 podStartE2EDuration="2.601275615s" podCreationTimestamp="2025-09-30 17:23:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:23:43.600239757 +0000 UTC m=+1470.354511563" watchObservedRunningTime="2025-09-30 17:23:43.601275615 +0000 UTC m=+1470.355547431" Sep 30 17:23:43 crc kubenswrapper[4818]: I0930 17:23:43.639326 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=2.639301103 podStartE2EDuration="2.639301103s" podCreationTimestamp="2025-09-30 17:23:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:23:43.621367359 +0000 UTC m=+1470.375639175" watchObservedRunningTime="2025-09-30 17:23:43.639301103 +0000 UTC m=+1470.393572919" Sep 30 17:23:43 crc kubenswrapper[4818]: I0930 17:23:43.648863 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=2.648845621 podStartE2EDuration="2.648845621s" podCreationTimestamp="2025-09-30 17:23:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:23:43.64619191 +0000 UTC m=+1470.400463726" watchObservedRunningTime="2025-09-30 17:23:43.648845621 +0000 UTC m=+1470.403117427" Sep 30 17:23:45 crc kubenswrapper[4818]: I0930 17:23:45.890063 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:45 crc kubenswrapper[4818]: I0930 17:23:45.911243 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5hrmw"] Sep 30 17:23:45 crc kubenswrapper[4818]: I0930 17:23:45.913227 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5hrmw" Sep 30 17:23:45 crc kubenswrapper[4818]: I0930 17:23:45.934057 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5hrmw"] Sep 30 17:23:46 crc kubenswrapper[4818]: I0930 17:23:46.039903 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-297gk\" (UniqueName: \"kubernetes.io/projected/abc9c693-7fcf-407d-a7fa-0cc2b3150bc1-kube-api-access-297gk\") pod \"redhat-operators-5hrmw\" (UID: \"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1\") " pod="openshift-marketplace/redhat-operators-5hrmw" Sep 30 17:23:46 crc kubenswrapper[4818]: I0930 17:23:46.039972 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abc9c693-7fcf-407d-a7fa-0cc2b3150bc1-catalog-content\") pod \"redhat-operators-5hrmw\" (UID: \"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1\") " pod="openshift-marketplace/redhat-operators-5hrmw" Sep 30 17:23:46 crc kubenswrapper[4818]: I0930 17:23:46.040093 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abc9c693-7fcf-407d-a7fa-0cc2b3150bc1-utilities\") pod \"redhat-operators-5hrmw\" (UID: \"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1\") " pod="openshift-marketplace/redhat-operators-5hrmw" Sep 30 17:23:46 crc kubenswrapper[4818]: I0930 17:23:46.141800 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-297gk\" (UniqueName: \"kubernetes.io/projected/abc9c693-7fcf-407d-a7fa-0cc2b3150bc1-kube-api-access-297gk\") pod \"redhat-operators-5hrmw\" (UID: \"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1\") " pod="openshift-marketplace/redhat-operators-5hrmw" Sep 30 17:23:46 crc kubenswrapper[4818]: I0930 17:23:46.141859 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abc9c693-7fcf-407d-a7fa-0cc2b3150bc1-catalog-content\") pod \"redhat-operators-5hrmw\" (UID: \"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1\") " pod="openshift-marketplace/redhat-operators-5hrmw" Sep 30 17:23:46 crc kubenswrapper[4818]: I0930 17:23:46.142023 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abc9c693-7fcf-407d-a7fa-0cc2b3150bc1-utilities\") pod \"redhat-operators-5hrmw\" (UID: \"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1\") " pod="openshift-marketplace/redhat-operators-5hrmw" Sep 30 17:23:46 crc kubenswrapper[4818]: I0930 17:23:46.142301 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abc9c693-7fcf-407d-a7fa-0cc2b3150bc1-catalog-content\") pod \"redhat-operators-5hrmw\" (UID: \"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1\") " pod="openshift-marketplace/redhat-operators-5hrmw" Sep 30 17:23:46 crc kubenswrapper[4818]: I0930 17:23:46.142831 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abc9c693-7fcf-407d-a7fa-0cc2b3150bc1-utilities\") pod \"redhat-operators-5hrmw\" (UID: \"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1\") " pod="openshift-marketplace/redhat-operators-5hrmw" Sep 30 17:23:46 crc kubenswrapper[4818]: I0930 17:23:46.161426 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-297gk\" (UniqueName: \"kubernetes.io/projected/abc9c693-7fcf-407d-a7fa-0cc2b3150bc1-kube-api-access-297gk\") pod \"redhat-operators-5hrmw\" (UID: \"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1\") " pod="openshift-marketplace/redhat-operators-5hrmw" Sep 30 17:23:46 crc kubenswrapper[4818]: I0930 17:23:46.243966 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5hrmw" Sep 30 17:23:46 crc kubenswrapper[4818]: I0930 17:23:46.718623 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5hrmw"] Sep 30 17:23:46 crc kubenswrapper[4818]: W0930 17:23:46.726892 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podabc9c693_7fcf_407d_a7fa_0cc2b3150bc1.slice/crio-4856eac6d0835a13e86b36797775106b31831f6c670053df634b64497176fdaf WatchSource:0}: Error finding container 4856eac6d0835a13e86b36797775106b31831f6c670053df634b64497176fdaf: Status 404 returned error can't find the container with id 4856eac6d0835a13e86b36797775106b31831f6c670053df634b64497176fdaf Sep 30 17:23:47 crc kubenswrapper[4818]: I0930 17:23:47.143820 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:47 crc kubenswrapper[4818]: I0930 17:23:47.166833 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:47 crc kubenswrapper[4818]: I0930 17:23:47.621006 4818 generic.go:334] "Generic (PLEG): container finished" podID="abc9c693-7fcf-407d-a7fa-0cc2b3150bc1" containerID="bf0c47971650be788146c36665964f8b0796e3fd61b3d4dd82fda93b05927cb8" exitCode=0 Sep 30 17:23:47 crc kubenswrapper[4818]: I0930 17:23:47.621165 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5hrmw" event={"ID":"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1","Type":"ContainerDied","Data":"bf0c47971650be788146c36665964f8b0796e3fd61b3d4dd82fda93b05927cb8"} Sep 30 17:23:47 crc kubenswrapper[4818]: I0930 17:23:47.621538 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5hrmw" event={"ID":"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1","Type":"ContainerStarted","Data":"4856eac6d0835a13e86b36797775106b31831f6c670053df634b64497176fdaf"} Sep 30 17:23:48 crc kubenswrapper[4818]: I0930 17:23:48.670270 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:49 crc kubenswrapper[4818]: I0930 17:23:49.661557 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5hrmw" event={"ID":"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1","Type":"ContainerStarted","Data":"b6f452ab0afd10c3940892d6688aeb7961a036332c576b1ade04a292a7fa0e41"} Sep 30 17:23:50 crc kubenswrapper[4818]: I0930 17:23:50.677563 4818 generic.go:334] "Generic (PLEG): container finished" podID="abc9c693-7fcf-407d-a7fa-0cc2b3150bc1" containerID="b6f452ab0afd10c3940892d6688aeb7961a036332c576b1ade04a292a7fa0e41" exitCode=0 Sep 30 17:23:50 crc kubenswrapper[4818]: I0930 17:23:50.677627 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5hrmw" event={"ID":"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1","Type":"ContainerDied","Data":"b6f452ab0afd10c3940892d6688aeb7961a036332c576b1ade04a292a7fa0e41"} Sep 30 17:23:52 crc kubenswrapper[4818]: I0930 17:23:52.144110 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:52 crc kubenswrapper[4818]: I0930 17:23:52.148916 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:52 crc kubenswrapper[4818]: I0930 17:23:52.167530 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:52 crc kubenswrapper[4818]: I0930 17:23:52.207567 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:52 crc kubenswrapper[4818]: I0930 17:23:52.229889 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:52 crc kubenswrapper[4818]: I0930 17:23:52.254885 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:52 crc kubenswrapper[4818]: I0930 17:23:52.595806 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:23:52 crc kubenswrapper[4818]: I0930 17:23:52.595902 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:23:52 crc kubenswrapper[4818]: I0930 17:23:52.695161 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:52 crc kubenswrapper[4818]: I0930 17:23:52.700127 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:52 crc kubenswrapper[4818]: I0930 17:23:52.725069 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:23:52 crc kubenswrapper[4818]: I0930 17:23:52.725350 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:23:54 crc kubenswrapper[4818]: I0930 17:23:54.713176 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5hrmw" event={"ID":"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1","Type":"ContainerStarted","Data":"c55ede4a157352116e6d3e2ddd3e5c8ff445289b2e56ae81c55f04ce505caabd"} Sep 30 17:23:54 crc kubenswrapper[4818]: I0930 17:23:54.730364 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5hrmw" podStartSLOduration=3.9997435279999998 podStartE2EDuration="9.730342448s" podCreationTimestamp="2025-09-30 17:23:45 +0000 UTC" firstStartedPulling="2025-09-30 17:23:47.623828226 +0000 UTC m=+1474.378100042" lastFinishedPulling="2025-09-30 17:23:53.354427146 +0000 UTC m=+1480.108698962" observedRunningTime="2025-09-30 17:23:54.727407169 +0000 UTC m=+1481.481679005" watchObservedRunningTime="2025-09-30 17:23:54.730342448 +0000 UTC m=+1481.484614274" Sep 30 17:23:55 crc kubenswrapper[4818]: I0930 17:23:55.773123 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:23:55 crc kubenswrapper[4818]: I0930 17:23:55.773705 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerName="ceilometer-central-agent" containerID="cri-o://2b196f6dd49e3fc7f4e924af70e11abd1ba8467efbe68230658a84c2a7e63042" gracePeriod=30 Sep 30 17:23:55 crc kubenswrapper[4818]: I0930 17:23:55.774124 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerName="proxy-httpd" containerID="cri-o://c677e548b2ae99b033af80d3b3c4ba0530ec48d94b6729e401c10604f452db27" gracePeriod=30 Sep 30 17:23:55 crc kubenswrapper[4818]: I0930 17:23:55.774136 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerName="sg-core" containerID="cri-o://3bbfc08ad1afcac147976aa3ccd8b1989088413246d3ba8aced67515a3882efc" gracePeriod=30 Sep 30 17:23:55 crc kubenswrapper[4818]: I0930 17:23:55.774154 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerName="ceilometer-notification-agent" containerID="cri-o://c5f33e27139ecfa8fa90423cf96a2dc5ef8e86a85df7733dbbbdbe455ed4e507" gracePeriod=30 Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.245401 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5hrmw" Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.245437 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5hrmw" Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.405165 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-gft8b"] Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.411007 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-gft8b"] Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.507883 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.508088 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="5d0e7f69-3396-40cc-8927-d9538019bb0e" containerName="watcher-applier" containerID="cri-o://9d1f4ef586382d62353fdae22893dd9c08ef1229bd482c4822eae9116d26ce1b" gracePeriod=30 Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.534544 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher07c2-account-delete-2hm76"] Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.535585 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher07c2-account-delete-2hm76" Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.558568 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher07c2-account-delete-2hm76"] Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.613557 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-db-create-kxfgv"] Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.628013 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-db-create-kxfgv"] Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.639811 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4z65t\" (UniqueName: \"kubernetes.io/projected/40e23c9b-60d3-40fd-96c0-35bff161aa1e-kube-api-access-4z65t\") pod \"watcher07c2-account-delete-2hm76\" (UID: \"40e23c9b-60d3-40fd-96c0-35bff161aa1e\") " pod="watcher-kuttl-default/watcher07c2-account-delete-2hm76" Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.648179 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher07c2-account-delete-2hm76"] Sep 30 17:23:56 crc kubenswrapper[4818]: E0930 17:23:56.649010 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-4z65t], unattached volumes=[], failed to process volumes=[]: context canceled" pod="watcher-kuttl-default/watcher07c2-account-delete-2hm76" podUID="40e23c9b-60d3-40fd-96c0-35bff161aa1e" Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.685828 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-07c2-account-create-xrzbk"] Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.721981 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-07c2-account-create-xrzbk"] Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.722046 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.722234 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="20d19e7b-7835-4ba3-9b20-cc7b0080b1ac" containerName="watcher-decision-engine" containerID="cri-o://fb9a5e81cff43843a8e7c216f77acb36240a05078bc246e14d73f0c76f6a151e" gracePeriod=30 Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.727639 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.728044 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="7a27bfb9-39c6-4874-b6f7-117a4a9d51d8" containerName="watcher-kuttl-api-log" containerID="cri-o://831b3884d96cc7c06ee697a478fccf983e6066b0450b92464b2875b159fea76a" gracePeriod=30 Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.728176 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="7a27bfb9-39c6-4874-b6f7-117a4a9d51d8" containerName="watcher-api" containerID="cri-o://8781cd53abef7ae91f619e74e058d9e61486305b7d2dc8b58f751c4abeabe168" gracePeriod=30 Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.741207 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4z65t\" (UniqueName: \"kubernetes.io/projected/40e23c9b-60d3-40fd-96c0-35bff161aa1e-kube-api-access-4z65t\") pod \"watcher07c2-account-delete-2hm76\" (UID: \"40e23c9b-60d3-40fd-96c0-35bff161aa1e\") " pod="watcher-kuttl-default/watcher07c2-account-delete-2hm76" Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.783725 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4z65t\" (UniqueName: \"kubernetes.io/projected/40e23c9b-60d3-40fd-96c0-35bff161aa1e-kube-api-access-4z65t\") pod \"watcher07c2-account-delete-2hm76\" (UID: \"40e23c9b-60d3-40fd-96c0-35bff161aa1e\") " pod="watcher-kuttl-default/watcher07c2-account-delete-2hm76" Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.789098 4818 generic.go:334] "Generic (PLEG): container finished" podID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerID="c677e548b2ae99b033af80d3b3c4ba0530ec48d94b6729e401c10604f452db27" exitCode=0 Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.789125 4818 generic.go:334] "Generic (PLEG): container finished" podID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerID="3bbfc08ad1afcac147976aa3ccd8b1989088413246d3ba8aced67515a3882efc" exitCode=2 Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.789145 4818 generic.go:334] "Generic (PLEG): container finished" podID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerID="2b196f6dd49e3fc7f4e924af70e11abd1ba8467efbe68230658a84c2a7e63042" exitCode=0 Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.789188 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher07c2-account-delete-2hm76" Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.789716 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c22dc5f8-114e-4b46-9f08-4936a8972056","Type":"ContainerDied","Data":"c677e548b2ae99b033af80d3b3c4ba0530ec48d94b6729e401c10604f452db27"} Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.789741 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c22dc5f8-114e-4b46-9f08-4936a8972056","Type":"ContainerDied","Data":"3bbfc08ad1afcac147976aa3ccd8b1989088413246d3ba8aced67515a3882efc"} Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.789753 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c22dc5f8-114e-4b46-9f08-4936a8972056","Type":"ContainerDied","Data":"2b196f6dd49e3fc7f4e924af70e11abd1ba8467efbe68230658a84c2a7e63042"} Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.832360 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher07c2-account-delete-2hm76" Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.943790 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4z65t\" (UniqueName: \"kubernetes.io/projected/40e23c9b-60d3-40fd-96c0-35bff161aa1e-kube-api-access-4z65t\") pod \"40e23c9b-60d3-40fd-96c0-35bff161aa1e\" (UID: \"40e23c9b-60d3-40fd-96c0-35bff161aa1e\") " Sep 30 17:23:56 crc kubenswrapper[4818]: I0930 17:23:56.948063 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40e23c9b-60d3-40fd-96c0-35bff161aa1e-kube-api-access-4z65t" (OuterVolumeSpecName: "kube-api-access-4z65t") pod "40e23c9b-60d3-40fd-96c0-35bff161aa1e" (UID: "40e23c9b-60d3-40fd-96c0-35bff161aa1e"). InnerVolumeSpecName "kube-api-access-4z65t". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:23:57 crc kubenswrapper[4818]: I0930 17:23:57.045420 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4z65t\" (UniqueName: \"kubernetes.io/projected/40e23c9b-60d3-40fd-96c0-35bff161aa1e-kube-api-access-4z65t\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:57 crc kubenswrapper[4818]: E0930 17:23:57.171188 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d1f4ef586382d62353fdae22893dd9c08ef1229bd482c4822eae9116d26ce1b" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 30 17:23:57 crc kubenswrapper[4818]: E0930 17:23:57.175099 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d1f4ef586382d62353fdae22893dd9c08ef1229bd482c4822eae9116d26ce1b" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 30 17:23:57 crc kubenswrapper[4818]: E0930 17:23:57.176737 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d1f4ef586382d62353fdae22893dd9c08ef1229bd482c4822eae9116d26ce1b" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 30 17:23:57 crc kubenswrapper[4818]: E0930 17:23:57.176870 4818 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="5d0e7f69-3396-40cc-8927-d9538019bb0e" containerName="watcher-applier" Sep 30 17:23:57 crc kubenswrapper[4818]: I0930 17:23:57.299393 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5hrmw" podUID="abc9c693-7fcf-407d-a7fa-0cc2b3150bc1" containerName="registry-server" probeResult="failure" output=< Sep 30 17:23:57 crc kubenswrapper[4818]: timeout: failed to connect service ":50051" within 1s Sep 30 17:23:57 crc kubenswrapper[4818]: > Sep 30 17:23:57 crc kubenswrapper[4818]: I0930 17:23:57.642453 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="7a27bfb9-39c6-4874-b6f7-117a4a9d51d8" containerName="watcher-kuttl-api-log" probeResult="failure" output="Get \"http://10.217.0.173:9322/\": read tcp 10.217.0.2:57234->10.217.0.173:9322: read: connection reset by peer" Sep 30 17:23:57 crc kubenswrapper[4818]: I0930 17:23:57.642458 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="7a27bfb9-39c6-4874-b6f7-117a4a9d51d8" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.173:9322/\": read tcp 10.217.0.2:57218->10.217.0.173:9322: read: connection reset by peer" Sep 30 17:23:57 crc kubenswrapper[4818]: I0930 17:23:57.807293 4818 generic.go:334] "Generic (PLEG): container finished" podID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerID="c5f33e27139ecfa8fa90423cf96a2dc5ef8e86a85df7733dbbbdbe455ed4e507" exitCode=0 Sep 30 17:23:57 crc kubenswrapper[4818]: I0930 17:23:57.807347 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c22dc5f8-114e-4b46-9f08-4936a8972056","Type":"ContainerDied","Data":"c5f33e27139ecfa8fa90423cf96a2dc5ef8e86a85df7733dbbbdbe455ed4e507"} Sep 30 17:23:57 crc kubenswrapper[4818]: I0930 17:23:57.808678 4818 generic.go:334] "Generic (PLEG): container finished" podID="7a27bfb9-39c6-4874-b6f7-117a4a9d51d8" containerID="8781cd53abef7ae91f619e74e058d9e61486305b7d2dc8b58f751c4abeabe168" exitCode=0 Sep 30 17:23:57 crc kubenswrapper[4818]: I0930 17:23:57.808695 4818 generic.go:334] "Generic (PLEG): container finished" podID="7a27bfb9-39c6-4874-b6f7-117a4a9d51d8" containerID="831b3884d96cc7c06ee697a478fccf983e6066b0450b92464b2875b159fea76a" exitCode=143 Sep 30 17:23:57 crc kubenswrapper[4818]: I0930 17:23:57.808735 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher07c2-account-delete-2hm76" Sep 30 17:23:57 crc kubenswrapper[4818]: I0930 17:23:57.810470 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8","Type":"ContainerDied","Data":"8781cd53abef7ae91f619e74e058d9e61486305b7d2dc8b58f751c4abeabe168"} Sep 30 17:23:57 crc kubenswrapper[4818]: I0930 17:23:57.810543 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8","Type":"ContainerDied","Data":"831b3884d96cc7c06ee697a478fccf983e6066b0450b92464b2875b159fea76a"} Sep 30 17:23:57 crc kubenswrapper[4818]: I0930 17:23:57.863508 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher07c2-account-delete-2hm76"] Sep 30 17:23:57 crc kubenswrapper[4818]: I0930 17:23:57.872538 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher07c2-account-delete-2hm76"] Sep 30 17:23:57 crc kubenswrapper[4818]: I0930 17:23:57.982094 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.031449 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2012bb72-3bb7-4725-b0c7-4e8a6c27a176" path="/var/lib/kubelet/pods/2012bb72-3bb7-4725-b0c7-4e8a6c27a176/volumes" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.032350 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40e23c9b-60d3-40fd-96c0-35bff161aa1e" path="/var/lib/kubelet/pods/40e23c9b-60d3-40fd-96c0-35bff161aa1e/volumes" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.032669 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="509ba697-f02e-4e73-b634-ff5b48e58f77" path="/var/lib/kubelet/pods/509ba697-f02e-4e73-b634-ff5b48e58f77/volumes" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.033169 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d0db22d-0ea6-41fe-a00e-56a39f206da4" path="/var/lib/kubelet/pods/7d0db22d-0ea6-41fe-a00e-56a39f206da4/volumes" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.116969 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.181293 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hcvtr\" (UniqueName: \"kubernetes.io/projected/c22dc5f8-114e-4b46-9f08-4936a8972056-kube-api-access-hcvtr\") pod \"c22dc5f8-114e-4b46-9f08-4936a8972056\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.181369 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-combined-ca-bundle\") pod \"c22dc5f8-114e-4b46-9f08-4936a8972056\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.181453 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-config-data\") pod \"c22dc5f8-114e-4b46-9f08-4936a8972056\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.181508 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c22dc5f8-114e-4b46-9f08-4936a8972056-run-httpd\") pod \"c22dc5f8-114e-4b46-9f08-4936a8972056\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.181573 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-ceilometer-tls-certs\") pod \"c22dc5f8-114e-4b46-9f08-4936a8972056\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.181614 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-sg-core-conf-yaml\") pod \"c22dc5f8-114e-4b46-9f08-4936a8972056\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.181638 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c22dc5f8-114e-4b46-9f08-4936a8972056-log-httpd\") pod \"c22dc5f8-114e-4b46-9f08-4936a8972056\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.181663 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-scripts\") pod \"c22dc5f8-114e-4b46-9f08-4936a8972056\" (UID: \"c22dc5f8-114e-4b46-9f08-4936a8972056\") " Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.182500 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c22dc5f8-114e-4b46-9f08-4936a8972056-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c22dc5f8-114e-4b46-9f08-4936a8972056" (UID: "c22dc5f8-114e-4b46-9f08-4936a8972056"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.182852 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c22dc5f8-114e-4b46-9f08-4936a8972056-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c22dc5f8-114e-4b46-9f08-4936a8972056" (UID: "c22dc5f8-114e-4b46-9f08-4936a8972056"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.195295 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c22dc5f8-114e-4b46-9f08-4936a8972056-kube-api-access-hcvtr" (OuterVolumeSpecName: "kube-api-access-hcvtr") pod "c22dc5f8-114e-4b46-9f08-4936a8972056" (UID: "c22dc5f8-114e-4b46-9f08-4936a8972056"). InnerVolumeSpecName "kube-api-access-hcvtr". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.200110 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-scripts" (OuterVolumeSpecName: "scripts") pod "c22dc5f8-114e-4b46-9f08-4936a8972056" (UID: "c22dc5f8-114e-4b46-9f08-4936a8972056"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.220916 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c22dc5f8-114e-4b46-9f08-4936a8972056" (UID: "c22dc5f8-114e-4b46-9f08-4936a8972056"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.243421 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "c22dc5f8-114e-4b46-9f08-4936a8972056" (UID: "c22dc5f8-114e-4b46-9f08-4936a8972056"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.257350 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c22dc5f8-114e-4b46-9f08-4936a8972056" (UID: "c22dc5f8-114e-4b46-9f08-4936a8972056"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.282520 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-custom-prometheus-ca\") pod \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.282572 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-logs\") pod \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.282705 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-cert-memcached-mtls\") pod \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.282729 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9fmgw\" (UniqueName: \"kubernetes.io/projected/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-kube-api-access-9fmgw\") pod \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.282799 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-config-data\") pod \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.282834 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-combined-ca-bundle\") pod \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\" (UID: \"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8\") " Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.283252 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.283275 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c22dc5f8-114e-4b46-9f08-4936a8972056-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.283288 4818 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.283301 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.283312 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c22dc5f8-114e-4b46-9f08-4936a8972056-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.283321 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.283332 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hcvtr\" (UniqueName: \"kubernetes.io/projected/c22dc5f8-114e-4b46-9f08-4936a8972056-kube-api-access-hcvtr\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.283889 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-logs" (OuterVolumeSpecName: "logs") pod "7a27bfb9-39c6-4874-b6f7-117a4a9d51d8" (UID: "7a27bfb9-39c6-4874-b6f7-117a4a9d51d8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.288110 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-config-data" (OuterVolumeSpecName: "config-data") pod "c22dc5f8-114e-4b46-9f08-4936a8972056" (UID: "c22dc5f8-114e-4b46-9f08-4936a8972056"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.288116 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-kube-api-access-9fmgw" (OuterVolumeSpecName: "kube-api-access-9fmgw") pod "7a27bfb9-39c6-4874-b6f7-117a4a9d51d8" (UID: "7a27bfb9-39c6-4874-b6f7-117a4a9d51d8"). InnerVolumeSpecName "kube-api-access-9fmgw". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.303803 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7a27bfb9-39c6-4874-b6f7-117a4a9d51d8" (UID: "7a27bfb9-39c6-4874-b6f7-117a4a9d51d8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.309580 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "7a27bfb9-39c6-4874-b6f7-117a4a9d51d8" (UID: "7a27bfb9-39c6-4874-b6f7-117a4a9d51d8"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.333354 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-config-data" (OuterVolumeSpecName: "config-data") pod "7a27bfb9-39c6-4874-b6f7-117a4a9d51d8" (UID: "7a27bfb9-39c6-4874-b6f7-117a4a9d51d8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.341883 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "7a27bfb9-39c6-4874-b6f7-117a4a9d51d8" (UID: "7a27bfb9-39c6-4874-b6f7-117a4a9d51d8"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.384514 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.384714 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.384774 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.384865 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9fmgw\" (UniqueName: \"kubernetes.io/projected/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-kube-api-access-9fmgw\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.384942 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.385009 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c22dc5f8-114e-4b46-9f08-4936a8972056-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.385062 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.821584 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"7a27bfb9-39c6-4874-b6f7-117a4a9d51d8","Type":"ContainerDied","Data":"7bb0edb16c29751916d9a012a41047da582a80c53e0a8eab37eb410bd85a66d9"} Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.821637 4818 scope.go:117] "RemoveContainer" containerID="8781cd53abef7ae91f619e74e058d9e61486305b7d2dc8b58f751c4abeabe168" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.821758 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.832148 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c22dc5f8-114e-4b46-9f08-4936a8972056","Type":"ContainerDied","Data":"c53e01d6bcc03047bc5fca649cb6c6a465b88b9895a7e75e844f8991290598d8"} Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.832296 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.852373 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.853864 4818 scope.go:117] "RemoveContainer" containerID="831b3884d96cc7c06ee697a478fccf983e6066b0450b92464b2875b159fea76a" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.857652 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.870073 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.879732 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.884525 4818 scope.go:117] "RemoveContainer" containerID="c677e548b2ae99b033af80d3b3c4ba0530ec48d94b6729e401c10604f452db27" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.893998 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:23:58 crc kubenswrapper[4818]: E0930 17:23:58.894308 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a27bfb9-39c6-4874-b6f7-117a4a9d51d8" containerName="watcher-kuttl-api-log" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.894326 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a27bfb9-39c6-4874-b6f7-117a4a9d51d8" containerName="watcher-kuttl-api-log" Sep 30 17:23:58 crc kubenswrapper[4818]: E0930 17:23:58.894341 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerName="sg-core" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.894347 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerName="sg-core" Sep 30 17:23:58 crc kubenswrapper[4818]: E0930 17:23:58.894362 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a27bfb9-39c6-4874-b6f7-117a4a9d51d8" containerName="watcher-api" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.894368 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a27bfb9-39c6-4874-b6f7-117a4a9d51d8" containerName="watcher-api" Sep 30 17:23:58 crc kubenswrapper[4818]: E0930 17:23:58.894380 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerName="ceilometer-notification-agent" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.894386 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerName="ceilometer-notification-agent" Sep 30 17:23:58 crc kubenswrapper[4818]: E0930 17:23:58.894400 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerName="proxy-httpd" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.894406 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerName="proxy-httpd" Sep 30 17:23:58 crc kubenswrapper[4818]: E0930 17:23:58.894415 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerName="ceilometer-central-agent" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.894420 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerName="ceilometer-central-agent" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.894566 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerName="proxy-httpd" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.894575 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerName="ceilometer-notification-agent" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.894583 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a27bfb9-39c6-4874-b6f7-117a4a9d51d8" containerName="watcher-kuttl-api-log" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.894589 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerName="sg-core" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.894603 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a27bfb9-39c6-4874-b6f7-117a4a9d51d8" containerName="watcher-api" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.894616 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="c22dc5f8-114e-4b46-9f08-4936a8972056" containerName="ceilometer-central-agent" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.896274 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.899747 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.900242 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.900361 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.921094 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.941324 4818 scope.go:117] "RemoveContainer" containerID="3bbfc08ad1afcac147976aa3ccd8b1989088413246d3ba8aced67515a3882efc" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.964164 4818 scope.go:117] "RemoveContainer" containerID="c5f33e27139ecfa8fa90423cf96a2dc5ef8e86a85df7733dbbbdbe455ed4e507" Sep 30 17:23:58 crc kubenswrapper[4818]: I0930 17:23:58.985207 4818 scope.go:117] "RemoveContainer" containerID="2b196f6dd49e3fc7f4e924af70e11abd1ba8467efbe68230658a84c2a7e63042" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.085564 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:23:59 crc kubenswrapper[4818]: E0930 17:23:59.086240 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ceilometer-tls-certs combined-ca-bundle config-data kube-api-access-62plr log-httpd run-httpd scripts sg-core-conf-yaml], unattached volumes=[], failed to process volumes=[]: context canceled" pod="watcher-kuttl-default/ceilometer-0" podUID="0c701b3d-f0e9-456a-8f19-b05c97078499" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.094270 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c701b3d-f0e9-456a-8f19-b05c97078499-run-httpd\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.094363 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-config-data\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.094385 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.094432 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62plr\" (UniqueName: \"kubernetes.io/projected/0c701b3d-f0e9-456a-8f19-b05c97078499-kube-api-access-62plr\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.094581 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c701b3d-f0e9-456a-8f19-b05c97078499-log-httpd\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.094647 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.094727 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.094904 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-scripts\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.196202 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-scripts\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.196298 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c701b3d-f0e9-456a-8f19-b05c97078499-run-httpd\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.196343 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.196357 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-config-data\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.196386 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62plr\" (UniqueName: \"kubernetes.io/projected/0c701b3d-f0e9-456a-8f19-b05c97078499-kube-api-access-62plr\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.196436 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c701b3d-f0e9-456a-8f19-b05c97078499-log-httpd\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.196454 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.196474 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.197289 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c701b3d-f0e9-456a-8f19-b05c97078499-run-httpd\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.197300 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c701b3d-f0e9-456a-8f19-b05c97078499-log-httpd\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.200405 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.200455 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.201509 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.201606 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-scripts\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.210145 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-config-data\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.213116 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62plr\" (UniqueName: \"kubernetes.io/projected/0c701b3d-f0e9-456a-8f19-b05c97078499-kube-api-access-62plr\") pod \"ceilometer-0\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.858544 4818 generic.go:334] "Generic (PLEG): container finished" podID="5d0e7f69-3396-40cc-8927-d9538019bb0e" containerID="9d1f4ef586382d62353fdae22893dd9c08ef1229bd482c4822eae9116d26ce1b" exitCode=0 Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.858840 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"5d0e7f69-3396-40cc-8927-d9538019bb0e","Type":"ContainerDied","Data":"9d1f4ef586382d62353fdae22893dd9c08ef1229bd482c4822eae9116d26ce1b"} Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.872278 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.899192 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:23:59 crc kubenswrapper[4818]: I0930 17:23:59.970977 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.007395 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-config-data\") pod \"0c701b3d-f0e9-456a-8f19-b05c97078499\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.007460 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c701b3d-f0e9-456a-8f19-b05c97078499-run-httpd\") pod \"0c701b3d-f0e9-456a-8f19-b05c97078499\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.007488 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c701b3d-f0e9-456a-8f19-b05c97078499-log-httpd\") pod \"0c701b3d-f0e9-456a-8f19-b05c97078499\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.007525 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-62plr\" (UniqueName: \"kubernetes.io/projected/0c701b3d-f0e9-456a-8f19-b05c97078499-kube-api-access-62plr\") pod \"0c701b3d-f0e9-456a-8f19-b05c97078499\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.007567 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-ceilometer-tls-certs\") pod \"0c701b3d-f0e9-456a-8f19-b05c97078499\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.007611 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-scripts\") pod \"0c701b3d-f0e9-456a-8f19-b05c97078499\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.007675 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-sg-core-conf-yaml\") pod \"0c701b3d-f0e9-456a-8f19-b05c97078499\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.007715 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-combined-ca-bundle\") pod \"0c701b3d-f0e9-456a-8f19-b05c97078499\" (UID: \"0c701b3d-f0e9-456a-8f19-b05c97078499\") " Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.008243 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c701b3d-f0e9-456a-8f19-b05c97078499-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0c701b3d-f0e9-456a-8f19-b05c97078499" (UID: "0c701b3d-f0e9-456a-8f19-b05c97078499"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.021202 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "0c701b3d-f0e9-456a-8f19-b05c97078499" (UID: "0c701b3d-f0e9-456a-8f19-b05c97078499"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.023150 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c701b3d-f0e9-456a-8f19-b05c97078499-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0c701b3d-f0e9-456a-8f19-b05c97078499" (UID: "0c701b3d-f0e9-456a-8f19-b05c97078499"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.026257 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0c701b3d-f0e9-456a-8f19-b05c97078499" (UID: "0c701b3d-f0e9-456a-8f19-b05c97078499"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.026356 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-config-data" (OuterVolumeSpecName: "config-data") pod "0c701b3d-f0e9-456a-8f19-b05c97078499" (UID: "0c701b3d-f0e9-456a-8f19-b05c97078499"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.026550 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c701b3d-f0e9-456a-8f19-b05c97078499-kube-api-access-62plr" (OuterVolumeSpecName: "kube-api-access-62plr") pod "0c701b3d-f0e9-456a-8f19-b05c97078499" (UID: "0c701b3d-f0e9-456a-8f19-b05c97078499"). InnerVolumeSpecName "kube-api-access-62plr". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.026782 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.026860 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c701b3d-f0e9-456a-8f19-b05c97078499-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.026897 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c701b3d-f0e9-456a-8f19-b05c97078499-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.027117 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-62plr\" (UniqueName: \"kubernetes.io/projected/0c701b3d-f0e9-456a-8f19-b05c97078499-kube-api-access-62plr\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.027139 4818 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.030071 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0c701b3d-f0e9-456a-8f19-b05c97078499" (UID: "0c701b3d-f0e9-456a-8f19-b05c97078499"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.030184 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-scripts" (OuterVolumeSpecName: "scripts") pod "0c701b3d-f0e9-456a-8f19-b05c97078499" (UID: "0c701b3d-f0e9-456a-8f19-b05c97078499"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.061955 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a27bfb9-39c6-4874-b6f7-117a4a9d51d8" path="/var/lib/kubelet/pods/7a27bfb9-39c6-4874-b6f7-117a4a9d51d8/volumes" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.062702 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c22dc5f8-114e-4b46-9f08-4936a8972056" path="/var/lib/kubelet/pods/c22dc5f8-114e-4b46-9f08-4936a8972056/volumes" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.128658 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d0e7f69-3396-40cc-8927-d9538019bb0e-logs\") pod \"5d0e7f69-3396-40cc-8927-d9538019bb0e\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.128751 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d0e7f69-3396-40cc-8927-d9538019bb0e-config-data\") pod \"5d0e7f69-3396-40cc-8927-d9538019bb0e\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.128814 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ptnx6\" (UniqueName: \"kubernetes.io/projected/5d0e7f69-3396-40cc-8927-d9538019bb0e-kube-api-access-ptnx6\") pod \"5d0e7f69-3396-40cc-8927-d9538019bb0e\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.128859 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d0e7f69-3396-40cc-8927-d9538019bb0e-combined-ca-bundle\") pod \"5d0e7f69-3396-40cc-8927-d9538019bb0e\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.128895 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/5d0e7f69-3396-40cc-8927-d9538019bb0e-cert-memcached-mtls\") pod \"5d0e7f69-3396-40cc-8927-d9538019bb0e\" (UID: \"5d0e7f69-3396-40cc-8927-d9538019bb0e\") " Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.129510 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d0e7f69-3396-40cc-8927-d9538019bb0e-logs" (OuterVolumeSpecName: "logs") pod "5d0e7f69-3396-40cc-8927-d9538019bb0e" (UID: "5d0e7f69-3396-40cc-8927-d9538019bb0e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.130473 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.130495 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.130504 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0c701b3d-f0e9-456a-8f19-b05c97078499-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.140528 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d0e7f69-3396-40cc-8927-d9538019bb0e-kube-api-access-ptnx6" (OuterVolumeSpecName: "kube-api-access-ptnx6") pod "5d0e7f69-3396-40cc-8927-d9538019bb0e" (UID: "5d0e7f69-3396-40cc-8927-d9538019bb0e"). InnerVolumeSpecName "kube-api-access-ptnx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.157653 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d0e7f69-3396-40cc-8927-d9538019bb0e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5d0e7f69-3396-40cc-8927-d9538019bb0e" (UID: "5d0e7f69-3396-40cc-8927-d9538019bb0e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.184912 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d0e7f69-3396-40cc-8927-d9538019bb0e-config-data" (OuterVolumeSpecName: "config-data") pod "5d0e7f69-3396-40cc-8927-d9538019bb0e" (UID: "5d0e7f69-3396-40cc-8927-d9538019bb0e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.207533 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d0e7f69-3396-40cc-8927-d9538019bb0e-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "5d0e7f69-3396-40cc-8927-d9538019bb0e" (UID: "5d0e7f69-3396-40cc-8927-d9538019bb0e"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.231979 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d0e7f69-3396-40cc-8927-d9538019bb0e-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.232019 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ptnx6\" (UniqueName: \"kubernetes.io/projected/5d0e7f69-3396-40cc-8927-d9538019bb0e-kube-api-access-ptnx6\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.232034 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d0e7f69-3396-40cc-8927-d9538019bb0e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.232047 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/5d0e7f69-3396-40cc-8927-d9538019bb0e-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.232058 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d0e7f69-3396-40cc-8927-d9538019bb0e-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.884288 4818 generic.go:334] "Generic (PLEG): container finished" podID="20d19e7b-7835-4ba3-9b20-cc7b0080b1ac" containerID="fb9a5e81cff43843a8e7c216f77acb36240a05078bc246e14d73f0c76f6a151e" exitCode=0 Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.884382 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac","Type":"ContainerDied","Data":"fb9a5e81cff43843a8e7c216f77acb36240a05078bc246e14d73f0c76f6a151e"} Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.887451 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"5d0e7f69-3396-40cc-8927-d9538019bb0e","Type":"ContainerDied","Data":"ef5de5a432d269f458e67b950838991be3a60f47fd5a7f5200a542f4561f3eb7"} Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.887480 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.887492 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.887510 4818 scope.go:117] "RemoveContainer" containerID="9d1f4ef586382d62353fdae22893dd9c08ef1229bd482c4822eae9116d26ce1b" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.949518 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.956939 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.965846 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.973790 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.983343 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:24:00 crc kubenswrapper[4818]: E0930 17:24:00.983986 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d0e7f69-3396-40cc-8927-d9538019bb0e" containerName="watcher-applier" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.984006 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d0e7f69-3396-40cc-8927-d9538019bb0e" containerName="watcher-applier" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.984259 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d0e7f69-3396-40cc-8927-d9538019bb0e" containerName="watcher-applier" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.985719 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.989028 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.989079 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:24:00 crc kubenswrapper[4818]: I0930 17:24:00.989035 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.010533 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.145969 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-scripts\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.146061 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.146083 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-log-httpd\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.146168 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.146188 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.146207 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-run-httpd\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.146246 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzvwx\" (UniqueName: \"kubernetes.io/projected/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-kube-api-access-dzvwx\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.146265 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-config-data\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.247406 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.247465 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-log-httpd\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.247553 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.247570 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.247609 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-run-httpd\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.247643 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzvwx\" (UniqueName: \"kubernetes.io/projected/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-kube-api-access-dzvwx\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.247659 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-config-data\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.247713 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-scripts\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.248044 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-log-httpd\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.248636 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-run-httpd\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.251590 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.251758 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.252625 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-config-data\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.252915 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.254879 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-scripts\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.263583 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzvwx\" (UniqueName: \"kubernetes.io/projected/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-kube-api-access-dzvwx\") pod \"ceilometer-0\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.310397 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.415347 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.551888 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-custom-prometheus-ca\") pod \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.551959 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-cert-memcached-mtls\") pod \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.551982 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-logs\") pod \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.552005 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pd4xp\" (UniqueName: \"kubernetes.io/projected/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-kube-api-access-pd4xp\") pod \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.552030 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-combined-ca-bundle\") pod \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.552170 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-config-data\") pod \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\" (UID: \"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac\") " Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.552558 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-logs" (OuterVolumeSpecName: "logs") pod "20d19e7b-7835-4ba3-9b20-cc7b0080b1ac" (UID: "20d19e7b-7835-4ba3-9b20-cc7b0080b1ac"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.556279 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-kube-api-access-pd4xp" (OuterVolumeSpecName: "kube-api-access-pd4xp") pod "20d19e7b-7835-4ba3-9b20-cc7b0080b1ac" (UID: "20d19e7b-7835-4ba3-9b20-cc7b0080b1ac"). InnerVolumeSpecName "kube-api-access-pd4xp". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.578180 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "20d19e7b-7835-4ba3-9b20-cc7b0080b1ac" (UID: "20d19e7b-7835-4ba3-9b20-cc7b0080b1ac"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.585541 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "20d19e7b-7835-4ba3-9b20-cc7b0080b1ac" (UID: "20d19e7b-7835-4ba3-9b20-cc7b0080b1ac"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.591783 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-config-data" (OuterVolumeSpecName: "config-data") pod "20d19e7b-7835-4ba3-9b20-cc7b0080b1ac" (UID: "20d19e7b-7835-4ba3-9b20-cc7b0080b1ac"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.624216 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "20d19e7b-7835-4ba3-9b20-cc7b0080b1ac" (UID: "20d19e7b-7835-4ba3-9b20-cc7b0080b1ac"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.654107 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.654137 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.654147 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.654157 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.654165 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pd4xp\" (UniqueName: \"kubernetes.io/projected/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-kube-api-access-pd4xp\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.654172 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.801863 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:24:01 crc kubenswrapper[4818]: W0930 17:24:01.812212 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod115e3e75_6f62_41c2_80a9_b4b21d63b3ad.slice/crio-e812ba045a93062b46d45760ef60fe575a9a5f51a97dadaf8212f24df89887e6 WatchSource:0}: Error finding container e812ba045a93062b46d45760ef60fe575a9a5f51a97dadaf8212f24df89887e6: Status 404 returned error can't find the container with id e812ba045a93062b46d45760ef60fe575a9a5f51a97dadaf8212f24df89887e6 Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.901408 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"115e3e75-6f62-41c2-80a9-b4b21d63b3ad","Type":"ContainerStarted","Data":"e812ba045a93062b46d45760ef60fe575a9a5f51a97dadaf8212f24df89887e6"} Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.903314 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"20d19e7b-7835-4ba3-9b20-cc7b0080b1ac","Type":"ContainerDied","Data":"091238b6f77973496c1d8aac8c3d6e752bdd13fd42c97fc8088c915737a07441"} Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.903344 4818 scope.go:117] "RemoveContainer" containerID="fb9a5e81cff43843a8e7c216f77acb36240a05078bc246e14d73f0c76f6a151e" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.903460 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.972870 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:24:01 crc kubenswrapper[4818]: I0930 17:24:01.979346 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:24:02 crc kubenswrapper[4818]: I0930 17:24:02.041209 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c701b3d-f0e9-456a-8f19-b05c97078499" path="/var/lib/kubelet/pods/0c701b3d-f0e9-456a-8f19-b05c97078499/volumes" Sep 30 17:24:02 crc kubenswrapper[4818]: I0930 17:24:02.042179 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20d19e7b-7835-4ba3-9b20-cc7b0080b1ac" path="/var/lib/kubelet/pods/20d19e7b-7835-4ba3-9b20-cc7b0080b1ac/volumes" Sep 30 17:24:02 crc kubenswrapper[4818]: I0930 17:24:02.044142 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d0e7f69-3396-40cc-8927-d9538019bb0e" path="/var/lib/kubelet/pods/5d0e7f69-3396-40cc-8927-d9538019bb0e/volumes" Sep 30 17:24:02 crc kubenswrapper[4818]: I0930 17:24:02.491990 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-db-create-tzwn7"] Sep 30 17:24:02 crc kubenswrapper[4818]: E0930 17:24:02.492580 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20d19e7b-7835-4ba3-9b20-cc7b0080b1ac" containerName="watcher-decision-engine" Sep 30 17:24:02 crc kubenswrapper[4818]: I0930 17:24:02.492596 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="20d19e7b-7835-4ba3-9b20-cc7b0080b1ac" containerName="watcher-decision-engine" Sep 30 17:24:02 crc kubenswrapper[4818]: I0930 17:24:02.492763 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="20d19e7b-7835-4ba3-9b20-cc7b0080b1ac" containerName="watcher-decision-engine" Sep 30 17:24:02 crc kubenswrapper[4818]: I0930 17:24:02.493535 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-tzwn7" Sep 30 17:24:02 crc kubenswrapper[4818]: I0930 17:24:02.504951 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-tzwn7"] Sep 30 17:24:02 crc kubenswrapper[4818]: I0930 17:24:02.579031 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ws6rh\" (UniqueName: \"kubernetes.io/projected/34f15efb-9be5-4a1c-9ab1-49ea8d5f828f-kube-api-access-ws6rh\") pod \"watcher-db-create-tzwn7\" (UID: \"34f15efb-9be5-4a1c-9ab1-49ea8d5f828f\") " pod="watcher-kuttl-default/watcher-db-create-tzwn7" Sep 30 17:24:02 crc kubenswrapper[4818]: I0930 17:24:02.680348 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ws6rh\" (UniqueName: \"kubernetes.io/projected/34f15efb-9be5-4a1c-9ab1-49ea8d5f828f-kube-api-access-ws6rh\") pod \"watcher-db-create-tzwn7\" (UID: \"34f15efb-9be5-4a1c-9ab1-49ea8d5f828f\") " pod="watcher-kuttl-default/watcher-db-create-tzwn7" Sep 30 17:24:02 crc kubenswrapper[4818]: I0930 17:24:02.708671 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ws6rh\" (UniqueName: \"kubernetes.io/projected/34f15efb-9be5-4a1c-9ab1-49ea8d5f828f-kube-api-access-ws6rh\") pod \"watcher-db-create-tzwn7\" (UID: \"34f15efb-9be5-4a1c-9ab1-49ea8d5f828f\") " pod="watcher-kuttl-default/watcher-db-create-tzwn7" Sep 30 17:24:02 crc kubenswrapper[4818]: I0930 17:24:02.868344 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-tzwn7" Sep 30 17:24:02 crc kubenswrapper[4818]: I0930 17:24:02.921187 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"115e3e75-6f62-41c2-80a9-b4b21d63b3ad","Type":"ContainerStarted","Data":"ecebfeab2a570638762e2eda6c40bbb0e0a17383f2d701272e997bc92bec699c"} Sep 30 17:24:03 crc kubenswrapper[4818]: I0930 17:24:03.383908 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-tzwn7"] Sep 30 17:24:03 crc kubenswrapper[4818]: I0930 17:24:03.934408 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"115e3e75-6f62-41c2-80a9-b4b21d63b3ad","Type":"ContainerStarted","Data":"a1358b0acd8da7bc2e6cbcf0282718bf2f2089d53a207ccad6314d59c3ced096"} Sep 30 17:24:03 crc kubenswrapper[4818]: I0930 17:24:03.936986 4818 generic.go:334] "Generic (PLEG): container finished" podID="34f15efb-9be5-4a1c-9ab1-49ea8d5f828f" containerID="f950aeccc9546a0bc347aa9b403f38db82e0ea3fb374796d2bc72d9ac4abff6f" exitCode=0 Sep 30 17:24:03 crc kubenswrapper[4818]: I0930 17:24:03.937021 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-tzwn7" event={"ID":"34f15efb-9be5-4a1c-9ab1-49ea8d5f828f","Type":"ContainerDied","Data":"f950aeccc9546a0bc347aa9b403f38db82e0ea3fb374796d2bc72d9ac4abff6f"} Sep 30 17:24:03 crc kubenswrapper[4818]: I0930 17:24:03.937044 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-tzwn7" event={"ID":"34f15efb-9be5-4a1c-9ab1-49ea8d5f828f","Type":"ContainerStarted","Data":"017aee2fa7222a02769a39652631ba859b63ff51253b6a631f6b2dbc794f804f"} Sep 30 17:24:04 crc kubenswrapper[4818]: I0930 17:24:04.956244 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"115e3e75-6f62-41c2-80a9-b4b21d63b3ad","Type":"ContainerStarted","Data":"bd6b5ff08f05e420c69c13ca88b8d01ca319ff85ffca77776d815a668c7b6b2d"} Sep 30 17:24:05 crc kubenswrapper[4818]: I0930 17:24:05.344440 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-tzwn7" Sep 30 17:24:05 crc kubenswrapper[4818]: I0930 17:24:05.529115 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ws6rh\" (UniqueName: \"kubernetes.io/projected/34f15efb-9be5-4a1c-9ab1-49ea8d5f828f-kube-api-access-ws6rh\") pod \"34f15efb-9be5-4a1c-9ab1-49ea8d5f828f\" (UID: \"34f15efb-9be5-4a1c-9ab1-49ea8d5f828f\") " Sep 30 17:24:05 crc kubenswrapper[4818]: I0930 17:24:05.532089 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34f15efb-9be5-4a1c-9ab1-49ea8d5f828f-kube-api-access-ws6rh" (OuterVolumeSpecName: "kube-api-access-ws6rh") pod "34f15efb-9be5-4a1c-9ab1-49ea8d5f828f" (UID: "34f15efb-9be5-4a1c-9ab1-49ea8d5f828f"). InnerVolumeSpecName "kube-api-access-ws6rh". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:24:05 crc kubenswrapper[4818]: I0930 17:24:05.630652 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ws6rh\" (UniqueName: \"kubernetes.io/projected/34f15efb-9be5-4a1c-9ab1-49ea8d5f828f-kube-api-access-ws6rh\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:05 crc kubenswrapper[4818]: I0930 17:24:05.966597 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-tzwn7" event={"ID":"34f15efb-9be5-4a1c-9ab1-49ea8d5f828f","Type":"ContainerDied","Data":"017aee2fa7222a02769a39652631ba859b63ff51253b6a631f6b2dbc794f804f"} Sep 30 17:24:05 crc kubenswrapper[4818]: I0930 17:24:05.966851 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="017aee2fa7222a02769a39652631ba859b63ff51253b6a631f6b2dbc794f804f" Sep 30 17:24:05 crc kubenswrapper[4818]: I0930 17:24:05.966905 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-tzwn7" Sep 30 17:24:05 crc kubenswrapper[4818]: I0930 17:24:05.971593 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"115e3e75-6f62-41c2-80a9-b4b21d63b3ad","Type":"ContainerStarted","Data":"90f437f7355d593fc809853050fc4f0ec8fad13d7f0be3ea7291bc93d3f2ee91"} Sep 30 17:24:05 crc kubenswrapper[4818]: I0930 17:24:05.972910 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:05 crc kubenswrapper[4818]: I0930 17:24:05.996400 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=2.440338546 podStartE2EDuration="5.996382107s" podCreationTimestamp="2025-09-30 17:24:00 +0000 UTC" firstStartedPulling="2025-09-30 17:24:01.815262936 +0000 UTC m=+1488.569534792" lastFinishedPulling="2025-09-30 17:24:05.371306537 +0000 UTC m=+1492.125578353" observedRunningTime="2025-09-30 17:24:05.995556164 +0000 UTC m=+1492.749828040" watchObservedRunningTime="2025-09-30 17:24:05.996382107 +0000 UTC m=+1492.750653923" Sep 30 17:24:07 crc kubenswrapper[4818]: I0930 17:24:07.286648 4818 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5hrmw" podUID="abc9c693-7fcf-407d-a7fa-0cc2b3150bc1" containerName="registry-server" probeResult="failure" output=< Sep 30 17:24:07 crc kubenswrapper[4818]: timeout: failed to connect service ":50051" within 1s Sep 30 17:24:07 crc kubenswrapper[4818]: > Sep 30 17:24:12 crc kubenswrapper[4818]: I0930 17:24:12.506645 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-8330-account-create-lc5wj"] Sep 30 17:24:12 crc kubenswrapper[4818]: E0930 17:24:12.507347 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34f15efb-9be5-4a1c-9ab1-49ea8d5f828f" containerName="mariadb-database-create" Sep 30 17:24:12 crc kubenswrapper[4818]: I0930 17:24:12.507358 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="34f15efb-9be5-4a1c-9ab1-49ea8d5f828f" containerName="mariadb-database-create" Sep 30 17:24:12 crc kubenswrapper[4818]: I0930 17:24:12.507517 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="34f15efb-9be5-4a1c-9ab1-49ea8d5f828f" containerName="mariadb-database-create" Sep 30 17:24:12 crc kubenswrapper[4818]: I0930 17:24:12.508013 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-8330-account-create-lc5wj" Sep 30 17:24:12 crc kubenswrapper[4818]: I0930 17:24:12.510723 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-db-secret" Sep 30 17:24:12 crc kubenswrapper[4818]: I0930 17:24:12.522250 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-8330-account-create-lc5wj"] Sep 30 17:24:12 crc kubenswrapper[4818]: I0930 17:24:12.557186 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xh5np\" (UniqueName: \"kubernetes.io/projected/e57330cc-de35-4537-bc9e-db3ff0e443b0-kube-api-access-xh5np\") pod \"watcher-8330-account-create-lc5wj\" (UID: \"e57330cc-de35-4537-bc9e-db3ff0e443b0\") " pod="watcher-kuttl-default/watcher-8330-account-create-lc5wj" Sep 30 17:24:12 crc kubenswrapper[4818]: I0930 17:24:12.658912 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xh5np\" (UniqueName: \"kubernetes.io/projected/e57330cc-de35-4537-bc9e-db3ff0e443b0-kube-api-access-xh5np\") pod \"watcher-8330-account-create-lc5wj\" (UID: \"e57330cc-de35-4537-bc9e-db3ff0e443b0\") " pod="watcher-kuttl-default/watcher-8330-account-create-lc5wj" Sep 30 17:24:12 crc kubenswrapper[4818]: I0930 17:24:12.687788 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xh5np\" (UniqueName: \"kubernetes.io/projected/e57330cc-de35-4537-bc9e-db3ff0e443b0-kube-api-access-xh5np\") pod \"watcher-8330-account-create-lc5wj\" (UID: \"e57330cc-de35-4537-bc9e-db3ff0e443b0\") " pod="watcher-kuttl-default/watcher-8330-account-create-lc5wj" Sep 30 17:24:12 crc kubenswrapper[4818]: I0930 17:24:12.877201 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-8330-account-create-lc5wj" Sep 30 17:24:13 crc kubenswrapper[4818]: I0930 17:24:13.367031 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-8330-account-create-lc5wj"] Sep 30 17:24:13 crc kubenswrapper[4818]: I0930 17:24:13.710749 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vg65g"] Sep 30 17:24:13 crc kubenswrapper[4818]: I0930 17:24:13.712437 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vg65g" Sep 30 17:24:13 crc kubenswrapper[4818]: I0930 17:24:13.727757 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vg65g"] Sep 30 17:24:13 crc kubenswrapper[4818]: I0930 17:24:13.779782 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thwpk\" (UniqueName: \"kubernetes.io/projected/fda8aee5-f56f-4875-8ef1-ce8858cdfeba-kube-api-access-thwpk\") pod \"certified-operators-vg65g\" (UID: \"fda8aee5-f56f-4875-8ef1-ce8858cdfeba\") " pod="openshift-marketplace/certified-operators-vg65g" Sep 30 17:24:13 crc kubenswrapper[4818]: I0930 17:24:13.780287 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fda8aee5-f56f-4875-8ef1-ce8858cdfeba-catalog-content\") pod \"certified-operators-vg65g\" (UID: \"fda8aee5-f56f-4875-8ef1-ce8858cdfeba\") " pod="openshift-marketplace/certified-operators-vg65g" Sep 30 17:24:13 crc kubenswrapper[4818]: I0930 17:24:13.780367 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fda8aee5-f56f-4875-8ef1-ce8858cdfeba-utilities\") pod \"certified-operators-vg65g\" (UID: \"fda8aee5-f56f-4875-8ef1-ce8858cdfeba\") " pod="openshift-marketplace/certified-operators-vg65g" Sep 30 17:24:13 crc kubenswrapper[4818]: I0930 17:24:13.881951 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thwpk\" (UniqueName: \"kubernetes.io/projected/fda8aee5-f56f-4875-8ef1-ce8858cdfeba-kube-api-access-thwpk\") pod \"certified-operators-vg65g\" (UID: \"fda8aee5-f56f-4875-8ef1-ce8858cdfeba\") " pod="openshift-marketplace/certified-operators-vg65g" Sep 30 17:24:13 crc kubenswrapper[4818]: I0930 17:24:13.882007 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fda8aee5-f56f-4875-8ef1-ce8858cdfeba-catalog-content\") pod \"certified-operators-vg65g\" (UID: \"fda8aee5-f56f-4875-8ef1-ce8858cdfeba\") " pod="openshift-marketplace/certified-operators-vg65g" Sep 30 17:24:13 crc kubenswrapper[4818]: I0930 17:24:13.882053 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fda8aee5-f56f-4875-8ef1-ce8858cdfeba-utilities\") pod \"certified-operators-vg65g\" (UID: \"fda8aee5-f56f-4875-8ef1-ce8858cdfeba\") " pod="openshift-marketplace/certified-operators-vg65g" Sep 30 17:24:13 crc kubenswrapper[4818]: I0930 17:24:13.882634 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fda8aee5-f56f-4875-8ef1-ce8858cdfeba-utilities\") pod \"certified-operators-vg65g\" (UID: \"fda8aee5-f56f-4875-8ef1-ce8858cdfeba\") " pod="openshift-marketplace/certified-operators-vg65g" Sep 30 17:24:13 crc kubenswrapper[4818]: I0930 17:24:13.882967 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fda8aee5-f56f-4875-8ef1-ce8858cdfeba-catalog-content\") pod \"certified-operators-vg65g\" (UID: \"fda8aee5-f56f-4875-8ef1-ce8858cdfeba\") " pod="openshift-marketplace/certified-operators-vg65g" Sep 30 17:24:13 crc kubenswrapper[4818]: I0930 17:24:13.907185 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thwpk\" (UniqueName: \"kubernetes.io/projected/fda8aee5-f56f-4875-8ef1-ce8858cdfeba-kube-api-access-thwpk\") pod \"certified-operators-vg65g\" (UID: \"fda8aee5-f56f-4875-8ef1-ce8858cdfeba\") " pod="openshift-marketplace/certified-operators-vg65g" Sep 30 17:24:14 crc kubenswrapper[4818]: I0930 17:24:14.083336 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vg65g" Sep 30 17:24:14 crc kubenswrapper[4818]: I0930 17:24:14.120156 4818 generic.go:334] "Generic (PLEG): container finished" podID="e57330cc-de35-4537-bc9e-db3ff0e443b0" containerID="92ac88fca6a6683e081c4fbbe95b5cbe4bd2df3952ad53486eded918aa01233f" exitCode=0 Sep 30 17:24:14 crc kubenswrapper[4818]: I0930 17:24:14.120209 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-8330-account-create-lc5wj" event={"ID":"e57330cc-de35-4537-bc9e-db3ff0e443b0","Type":"ContainerDied","Data":"92ac88fca6a6683e081c4fbbe95b5cbe4bd2df3952ad53486eded918aa01233f"} Sep 30 17:24:14 crc kubenswrapper[4818]: I0930 17:24:14.120240 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-8330-account-create-lc5wj" event={"ID":"e57330cc-de35-4537-bc9e-db3ff0e443b0","Type":"ContainerStarted","Data":"9e89dc3d19ea4e1190de2aa6f7fec9e0f5bfe15ccf6ad5679cbdb73c21d5d85c"} Sep 30 17:24:14 crc kubenswrapper[4818]: I0930 17:24:14.603401 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vg65g"] Sep 30 17:24:14 crc kubenswrapper[4818]: W0930 17:24:14.607549 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfda8aee5_f56f_4875_8ef1_ce8858cdfeba.slice/crio-ca1dc6440b60bd55e24e45bead586f8f540139efeecfb8e6974210283ea86fbf WatchSource:0}: Error finding container ca1dc6440b60bd55e24e45bead586f8f540139efeecfb8e6974210283ea86fbf: Status 404 returned error can't find the container with id ca1dc6440b60bd55e24e45bead586f8f540139efeecfb8e6974210283ea86fbf Sep 30 17:24:15 crc kubenswrapper[4818]: I0930 17:24:15.132625 4818 generic.go:334] "Generic (PLEG): container finished" podID="fda8aee5-f56f-4875-8ef1-ce8858cdfeba" containerID="dbc822498410508327f04db177d74625cfcf30eff452704f9a0e6b4c88c0384c" exitCode=0 Sep 30 17:24:15 crc kubenswrapper[4818]: I0930 17:24:15.132683 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vg65g" event={"ID":"fda8aee5-f56f-4875-8ef1-ce8858cdfeba","Type":"ContainerDied","Data":"dbc822498410508327f04db177d74625cfcf30eff452704f9a0e6b4c88c0384c"} Sep 30 17:24:15 crc kubenswrapper[4818]: I0930 17:24:15.133052 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vg65g" event={"ID":"fda8aee5-f56f-4875-8ef1-ce8858cdfeba","Type":"ContainerStarted","Data":"ca1dc6440b60bd55e24e45bead586f8f540139efeecfb8e6974210283ea86fbf"} Sep 30 17:24:16 crc kubenswrapper[4818]: I0930 17:24:15.538383 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-8330-account-create-lc5wj" Sep 30 17:24:16 crc kubenswrapper[4818]: I0930 17:24:15.613291 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xh5np\" (UniqueName: \"kubernetes.io/projected/e57330cc-de35-4537-bc9e-db3ff0e443b0-kube-api-access-xh5np\") pod \"e57330cc-de35-4537-bc9e-db3ff0e443b0\" (UID: \"e57330cc-de35-4537-bc9e-db3ff0e443b0\") " Sep 30 17:24:16 crc kubenswrapper[4818]: I0930 17:24:15.620524 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e57330cc-de35-4537-bc9e-db3ff0e443b0-kube-api-access-xh5np" (OuterVolumeSpecName: "kube-api-access-xh5np") pod "e57330cc-de35-4537-bc9e-db3ff0e443b0" (UID: "e57330cc-de35-4537-bc9e-db3ff0e443b0"). InnerVolumeSpecName "kube-api-access-xh5np". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:24:16 crc kubenswrapper[4818]: I0930 17:24:15.715489 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xh5np\" (UniqueName: \"kubernetes.io/projected/e57330cc-de35-4537-bc9e-db3ff0e443b0-kube-api-access-xh5np\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:16 crc kubenswrapper[4818]: I0930 17:24:16.140610 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-8330-account-create-lc5wj" event={"ID":"e57330cc-de35-4537-bc9e-db3ff0e443b0","Type":"ContainerDied","Data":"9e89dc3d19ea4e1190de2aa6f7fec9e0f5bfe15ccf6ad5679cbdb73c21d5d85c"} Sep 30 17:24:16 crc kubenswrapper[4818]: I0930 17:24:16.140640 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9e89dc3d19ea4e1190de2aa6f7fec9e0f5bfe15ccf6ad5679cbdb73c21d5d85c" Sep 30 17:24:16 crc kubenswrapper[4818]: I0930 17:24:16.140673 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-8330-account-create-lc5wj" Sep 30 17:24:16 crc kubenswrapper[4818]: I0930 17:24:16.297606 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5hrmw" Sep 30 17:24:16 crc kubenswrapper[4818]: I0930 17:24:16.374984 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5hrmw" Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.159232 4818 generic.go:334] "Generic (PLEG): container finished" podID="fda8aee5-f56f-4875-8ef1-ce8858cdfeba" containerID="7b08d65606af0ee74b552488b86b4be4afaea03991b7fa0d38a3827863f6c12c" exitCode=0 Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.159351 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vg65g" event={"ID":"fda8aee5-f56f-4875-8ef1-ce8858cdfeba","Type":"ContainerDied","Data":"7b08d65606af0ee74b552488b86b4be4afaea03991b7fa0d38a3827863f6c12c"} Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.762757 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs"] Sep 30 17:24:17 crc kubenswrapper[4818]: E0930 17:24:17.763843 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e57330cc-de35-4537-bc9e-db3ff0e443b0" containerName="mariadb-account-create" Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.763914 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="e57330cc-de35-4537-bc9e-db3ff0e443b0" containerName="mariadb-account-create" Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.764217 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="e57330cc-de35-4537-bc9e-db3ff0e443b0" containerName="mariadb-account-create" Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.764898 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.772001 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-9sl6v" Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.772707 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-config-data" Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.778188 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs"] Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.851507 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dmff\" (UniqueName: \"kubernetes.io/projected/918ea858-4ad5-46f8-a917-608a55a2a80b-kube-api-access-2dmff\") pod \"watcher-kuttl-db-sync-cbbxs\" (UID: \"918ea858-4ad5-46f8-a917-608a55a2a80b\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.851575 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/918ea858-4ad5-46f8-a917-608a55a2a80b-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-cbbxs\" (UID: \"918ea858-4ad5-46f8-a917-608a55a2a80b\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.851657 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/918ea858-4ad5-46f8-a917-608a55a2a80b-config-data\") pod \"watcher-kuttl-db-sync-cbbxs\" (UID: \"918ea858-4ad5-46f8-a917-608a55a2a80b\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.851741 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/918ea858-4ad5-46f8-a917-608a55a2a80b-db-sync-config-data\") pod \"watcher-kuttl-db-sync-cbbxs\" (UID: \"918ea858-4ad5-46f8-a917-608a55a2a80b\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.952839 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/918ea858-4ad5-46f8-a917-608a55a2a80b-config-data\") pod \"watcher-kuttl-db-sync-cbbxs\" (UID: \"918ea858-4ad5-46f8-a917-608a55a2a80b\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.953738 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/918ea858-4ad5-46f8-a917-608a55a2a80b-db-sync-config-data\") pod \"watcher-kuttl-db-sync-cbbxs\" (UID: \"918ea858-4ad5-46f8-a917-608a55a2a80b\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.953791 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dmff\" (UniqueName: \"kubernetes.io/projected/918ea858-4ad5-46f8-a917-608a55a2a80b-kube-api-access-2dmff\") pod \"watcher-kuttl-db-sync-cbbxs\" (UID: \"918ea858-4ad5-46f8-a917-608a55a2a80b\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.953828 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/918ea858-4ad5-46f8-a917-608a55a2a80b-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-cbbxs\" (UID: \"918ea858-4ad5-46f8-a917-608a55a2a80b\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.958954 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/918ea858-4ad5-46f8-a917-608a55a2a80b-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-cbbxs\" (UID: \"918ea858-4ad5-46f8-a917-608a55a2a80b\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.959487 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/918ea858-4ad5-46f8-a917-608a55a2a80b-db-sync-config-data\") pod \"watcher-kuttl-db-sync-cbbxs\" (UID: \"918ea858-4ad5-46f8-a917-608a55a2a80b\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.960203 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/918ea858-4ad5-46f8-a917-608a55a2a80b-config-data\") pod \"watcher-kuttl-db-sync-cbbxs\" (UID: \"918ea858-4ad5-46f8-a917-608a55a2a80b\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" Sep 30 17:24:17 crc kubenswrapper[4818]: I0930 17:24:17.984186 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dmff\" (UniqueName: \"kubernetes.io/projected/918ea858-4ad5-46f8-a917-608a55a2a80b-kube-api-access-2dmff\") pod \"watcher-kuttl-db-sync-cbbxs\" (UID: \"918ea858-4ad5-46f8-a917-608a55a2a80b\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" Sep 30 17:24:18 crc kubenswrapper[4818]: I0930 17:24:18.078759 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" Sep 30 17:24:18 crc kubenswrapper[4818]: I0930 17:24:18.181886 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vg65g" event={"ID":"fda8aee5-f56f-4875-8ef1-ce8858cdfeba","Type":"ContainerStarted","Data":"af3a8905b0821470104da568b542524329d233712b2a994da0bb6aed9d9286f1"} Sep 30 17:24:18 crc kubenswrapper[4818]: I0930 17:24:18.542581 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vg65g" podStartSLOduration=3.086006506 podStartE2EDuration="5.542563173s" podCreationTimestamp="2025-09-30 17:24:13 +0000 UTC" firstStartedPulling="2025-09-30 17:24:15.135126313 +0000 UTC m=+1501.889398159" lastFinishedPulling="2025-09-30 17:24:17.591683 +0000 UTC m=+1504.345954826" observedRunningTime="2025-09-30 17:24:18.207595721 +0000 UTC m=+1504.961867537" watchObservedRunningTime="2025-09-30 17:24:18.542563173 +0000 UTC m=+1505.296834989" Sep 30 17:24:18 crc kubenswrapper[4818]: I0930 17:24:18.543130 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs"] Sep 30 17:24:18 crc kubenswrapper[4818]: W0930 17:24:18.551066 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod918ea858_4ad5_46f8_a917_608a55a2a80b.slice/crio-dd97903764a35907bf6578141aa2fba0d978f4be0fabf64cd3e83860e7ffbb9d WatchSource:0}: Error finding container dd97903764a35907bf6578141aa2fba0d978f4be0fabf64cd3e83860e7ffbb9d: Status 404 returned error can't find the container with id dd97903764a35907bf6578141aa2fba0d978f4be0fabf64cd3e83860e7ffbb9d Sep 30 17:24:19 crc kubenswrapper[4818]: I0930 17:24:19.193642 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" event={"ID":"918ea858-4ad5-46f8-a917-608a55a2a80b","Type":"ContainerStarted","Data":"a7318b88d040aeb2352229ca638e298bbc9636d342d7b51903d9f6da2745c950"} Sep 30 17:24:19 crc kubenswrapper[4818]: I0930 17:24:19.194020 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" event={"ID":"918ea858-4ad5-46f8-a917-608a55a2a80b","Type":"ContainerStarted","Data":"dd97903764a35907bf6578141aa2fba0d978f4be0fabf64cd3e83860e7ffbb9d"} Sep 30 17:24:19 crc kubenswrapper[4818]: I0930 17:24:19.209037 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" podStartSLOduration=2.209014743 podStartE2EDuration="2.209014743s" podCreationTimestamp="2025-09-30 17:24:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:24:19.206058013 +0000 UTC m=+1505.960329829" watchObservedRunningTime="2025-09-30 17:24:19.209014743 +0000 UTC m=+1505.963286559" Sep 30 17:24:20 crc kubenswrapper[4818]: I0930 17:24:20.703808 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5hrmw"] Sep 30 17:24:20 crc kubenswrapper[4818]: I0930 17:24:20.705846 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5hrmw" podUID="abc9c693-7fcf-407d-a7fa-0cc2b3150bc1" containerName="registry-server" containerID="cri-o://c55ede4a157352116e6d3e2ddd3e5c8ff445289b2e56ae81c55f04ce505caabd" gracePeriod=2 Sep 30 17:24:20 crc kubenswrapper[4818]: I0930 17:24:20.785193 4818 scope.go:117] "RemoveContainer" containerID="f4881e26b2431315c39ff35a5becad71a574c68d86198258c713e669051da2cb" Sep 30 17:24:20 crc kubenswrapper[4818]: I0930 17:24:20.922402 4818 scope.go:117] "RemoveContainer" containerID="1155add45ca0967bd050f5c337f2fd6c9b1196c444bc5d6f67b8363a03795280" Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.194792 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5hrmw" Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.207524 4818 generic.go:334] "Generic (PLEG): container finished" podID="abc9c693-7fcf-407d-a7fa-0cc2b3150bc1" containerID="c55ede4a157352116e6d3e2ddd3e5c8ff445289b2e56ae81c55f04ce505caabd" exitCode=0 Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.207588 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5hrmw" event={"ID":"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1","Type":"ContainerDied","Data":"c55ede4a157352116e6d3e2ddd3e5c8ff445289b2e56ae81c55f04ce505caabd"} Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.207615 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5hrmw" event={"ID":"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1","Type":"ContainerDied","Data":"4856eac6d0835a13e86b36797775106b31831f6c670053df634b64497176fdaf"} Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.207638 4818 scope.go:117] "RemoveContainer" containerID="c55ede4a157352116e6d3e2ddd3e5c8ff445289b2e56ae81c55f04ce505caabd" Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.207766 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5hrmw" Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.209511 4818 generic.go:334] "Generic (PLEG): container finished" podID="918ea858-4ad5-46f8-a917-608a55a2a80b" containerID="a7318b88d040aeb2352229ca638e298bbc9636d342d7b51903d9f6da2745c950" exitCode=0 Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.209546 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" event={"ID":"918ea858-4ad5-46f8-a917-608a55a2a80b","Type":"ContainerDied","Data":"a7318b88d040aeb2352229ca638e298bbc9636d342d7b51903d9f6da2745c950"} Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.227097 4818 scope.go:117] "RemoveContainer" containerID="b6f452ab0afd10c3940892d6688aeb7961a036332c576b1ade04a292a7fa0e41" Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.259365 4818 scope.go:117] "RemoveContainer" containerID="bf0c47971650be788146c36665964f8b0796e3fd61b3d4dd82fda93b05927cb8" Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.279982 4818 scope.go:117] "RemoveContainer" containerID="c55ede4a157352116e6d3e2ddd3e5c8ff445289b2e56ae81c55f04ce505caabd" Sep 30 17:24:21 crc kubenswrapper[4818]: E0930 17:24:21.280370 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c55ede4a157352116e6d3e2ddd3e5c8ff445289b2e56ae81c55f04ce505caabd\": container with ID starting with c55ede4a157352116e6d3e2ddd3e5c8ff445289b2e56ae81c55f04ce505caabd not found: ID does not exist" containerID="c55ede4a157352116e6d3e2ddd3e5c8ff445289b2e56ae81c55f04ce505caabd" Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.280406 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c55ede4a157352116e6d3e2ddd3e5c8ff445289b2e56ae81c55f04ce505caabd"} err="failed to get container status \"c55ede4a157352116e6d3e2ddd3e5c8ff445289b2e56ae81c55f04ce505caabd\": rpc error: code = NotFound desc = could not find container \"c55ede4a157352116e6d3e2ddd3e5c8ff445289b2e56ae81c55f04ce505caabd\": container with ID starting with c55ede4a157352116e6d3e2ddd3e5c8ff445289b2e56ae81c55f04ce505caabd not found: ID does not exist" Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.280432 4818 scope.go:117] "RemoveContainer" containerID="b6f452ab0afd10c3940892d6688aeb7961a036332c576b1ade04a292a7fa0e41" Sep 30 17:24:21 crc kubenswrapper[4818]: E0930 17:24:21.280995 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6f452ab0afd10c3940892d6688aeb7961a036332c576b1ade04a292a7fa0e41\": container with ID starting with b6f452ab0afd10c3940892d6688aeb7961a036332c576b1ade04a292a7fa0e41 not found: ID does not exist" containerID="b6f452ab0afd10c3940892d6688aeb7961a036332c576b1ade04a292a7fa0e41" Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.281028 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6f452ab0afd10c3940892d6688aeb7961a036332c576b1ade04a292a7fa0e41"} err="failed to get container status \"b6f452ab0afd10c3940892d6688aeb7961a036332c576b1ade04a292a7fa0e41\": rpc error: code = NotFound desc = could not find container \"b6f452ab0afd10c3940892d6688aeb7961a036332c576b1ade04a292a7fa0e41\": container with ID starting with b6f452ab0afd10c3940892d6688aeb7961a036332c576b1ade04a292a7fa0e41 not found: ID does not exist" Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.281043 4818 scope.go:117] "RemoveContainer" containerID="bf0c47971650be788146c36665964f8b0796e3fd61b3d4dd82fda93b05927cb8" Sep 30 17:24:21 crc kubenswrapper[4818]: E0930 17:24:21.281387 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf0c47971650be788146c36665964f8b0796e3fd61b3d4dd82fda93b05927cb8\": container with ID starting with bf0c47971650be788146c36665964f8b0796e3fd61b3d4dd82fda93b05927cb8 not found: ID does not exist" containerID="bf0c47971650be788146c36665964f8b0796e3fd61b3d4dd82fda93b05927cb8" Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.281405 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf0c47971650be788146c36665964f8b0796e3fd61b3d4dd82fda93b05927cb8"} err="failed to get container status \"bf0c47971650be788146c36665964f8b0796e3fd61b3d4dd82fda93b05927cb8\": rpc error: code = NotFound desc = could not find container \"bf0c47971650be788146c36665964f8b0796e3fd61b3d4dd82fda93b05927cb8\": container with ID starting with bf0c47971650be788146c36665964f8b0796e3fd61b3d4dd82fda93b05927cb8 not found: ID does not exist" Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.309354 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-297gk\" (UniqueName: \"kubernetes.io/projected/abc9c693-7fcf-407d-a7fa-0cc2b3150bc1-kube-api-access-297gk\") pod \"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1\" (UID: \"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1\") " Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.309433 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abc9c693-7fcf-407d-a7fa-0cc2b3150bc1-utilities\") pod \"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1\" (UID: \"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1\") " Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.309548 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abc9c693-7fcf-407d-a7fa-0cc2b3150bc1-catalog-content\") pod \"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1\" (UID: \"abc9c693-7fcf-407d-a7fa-0cc2b3150bc1\") " Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.313910 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/abc9c693-7fcf-407d-a7fa-0cc2b3150bc1-utilities" (OuterVolumeSpecName: "utilities") pod "abc9c693-7fcf-407d-a7fa-0cc2b3150bc1" (UID: "abc9c693-7fcf-407d-a7fa-0cc2b3150bc1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.314490 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abc9c693-7fcf-407d-a7fa-0cc2b3150bc1-kube-api-access-297gk" (OuterVolumeSpecName: "kube-api-access-297gk") pod "abc9c693-7fcf-407d-a7fa-0cc2b3150bc1" (UID: "abc9c693-7fcf-407d-a7fa-0cc2b3150bc1"). InnerVolumeSpecName "kube-api-access-297gk". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.399658 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/abc9c693-7fcf-407d-a7fa-0cc2b3150bc1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "abc9c693-7fcf-407d-a7fa-0cc2b3150bc1" (UID: "abc9c693-7fcf-407d-a7fa-0cc2b3150bc1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.411847 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abc9c693-7fcf-407d-a7fa-0cc2b3150bc1-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.411891 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-297gk\" (UniqueName: \"kubernetes.io/projected/abc9c693-7fcf-407d-a7fa-0cc2b3150bc1-kube-api-access-297gk\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.411908 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abc9c693-7fcf-407d-a7fa-0cc2b3150bc1-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.541662 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5hrmw"] Sep 30 17:24:21 crc kubenswrapper[4818]: I0930 17:24:21.547855 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5hrmw"] Sep 30 17:24:22 crc kubenswrapper[4818]: I0930 17:24:22.032131 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="abc9c693-7fcf-407d-a7fa-0cc2b3150bc1" path="/var/lib/kubelet/pods/abc9c693-7fcf-407d-a7fa-0cc2b3150bc1/volumes" Sep 30 17:24:22 crc kubenswrapper[4818]: I0930 17:24:22.564824 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" Sep 30 17:24:22 crc kubenswrapper[4818]: I0930 17:24:22.595988 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:24:22 crc kubenswrapper[4818]: I0930 17:24:22.596047 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:24:22 crc kubenswrapper[4818]: I0930 17:24:22.596094 4818 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 17:24:22 crc kubenswrapper[4818]: I0930 17:24:22.596739 4818 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233"} pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 30 17:24:22 crc kubenswrapper[4818]: I0930 17:24:22.596799 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" containerID="cri-o://91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" gracePeriod=600 Sep 30 17:24:22 crc kubenswrapper[4818]: E0930 17:24:22.717291 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:24:22 crc kubenswrapper[4818]: I0930 17:24:22.733581 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/918ea858-4ad5-46f8-a917-608a55a2a80b-combined-ca-bundle\") pod \"918ea858-4ad5-46f8-a917-608a55a2a80b\" (UID: \"918ea858-4ad5-46f8-a917-608a55a2a80b\") " Sep 30 17:24:22 crc kubenswrapper[4818]: I0930 17:24:22.733669 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/918ea858-4ad5-46f8-a917-608a55a2a80b-config-data\") pod \"918ea858-4ad5-46f8-a917-608a55a2a80b\" (UID: \"918ea858-4ad5-46f8-a917-608a55a2a80b\") " Sep 30 17:24:22 crc kubenswrapper[4818]: I0930 17:24:22.733844 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2dmff\" (UniqueName: \"kubernetes.io/projected/918ea858-4ad5-46f8-a917-608a55a2a80b-kube-api-access-2dmff\") pod \"918ea858-4ad5-46f8-a917-608a55a2a80b\" (UID: \"918ea858-4ad5-46f8-a917-608a55a2a80b\") " Sep 30 17:24:22 crc kubenswrapper[4818]: I0930 17:24:22.733981 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/918ea858-4ad5-46f8-a917-608a55a2a80b-db-sync-config-data\") pod \"918ea858-4ad5-46f8-a917-608a55a2a80b\" (UID: \"918ea858-4ad5-46f8-a917-608a55a2a80b\") " Sep 30 17:24:22 crc kubenswrapper[4818]: I0930 17:24:22.739429 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/918ea858-4ad5-46f8-a917-608a55a2a80b-kube-api-access-2dmff" (OuterVolumeSpecName: "kube-api-access-2dmff") pod "918ea858-4ad5-46f8-a917-608a55a2a80b" (UID: "918ea858-4ad5-46f8-a917-608a55a2a80b"). InnerVolumeSpecName "kube-api-access-2dmff". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:24:22 crc kubenswrapper[4818]: I0930 17:24:22.740780 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/918ea858-4ad5-46f8-a917-608a55a2a80b-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "918ea858-4ad5-46f8-a917-608a55a2a80b" (UID: "918ea858-4ad5-46f8-a917-608a55a2a80b"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:22 crc kubenswrapper[4818]: I0930 17:24:22.764174 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/918ea858-4ad5-46f8-a917-608a55a2a80b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "918ea858-4ad5-46f8-a917-608a55a2a80b" (UID: "918ea858-4ad5-46f8-a917-608a55a2a80b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:22 crc kubenswrapper[4818]: I0930 17:24:22.785397 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/918ea858-4ad5-46f8-a917-608a55a2a80b-config-data" (OuterVolumeSpecName: "config-data") pod "918ea858-4ad5-46f8-a917-608a55a2a80b" (UID: "918ea858-4ad5-46f8-a917-608a55a2a80b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:22 crc kubenswrapper[4818]: I0930 17:24:22.835795 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2dmff\" (UniqueName: \"kubernetes.io/projected/918ea858-4ad5-46f8-a917-608a55a2a80b-kube-api-access-2dmff\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:22 crc kubenswrapper[4818]: I0930 17:24:22.835830 4818 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/918ea858-4ad5-46f8-a917-608a55a2a80b-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:22 crc kubenswrapper[4818]: I0930 17:24:22.835839 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/918ea858-4ad5-46f8-a917-608a55a2a80b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:22 crc kubenswrapper[4818]: I0930 17:24:22.835848 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/918ea858-4ad5-46f8-a917-608a55a2a80b-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.233172 4818 generic.go:334] "Generic (PLEG): container finished" podID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" exitCode=0 Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.233255 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerDied","Data":"91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233"} Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.233330 4818 scope.go:117] "RemoveContainer" containerID="ae5094dfd804c3f512a41e1f23be19d77cd5136dc31ac2ab100aaebcb668c7b1" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.234032 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:24:23 crc kubenswrapper[4818]: E0930 17:24:23.234510 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.234769 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" event={"ID":"918ea858-4ad5-46f8-a917-608a55a2a80b","Type":"ContainerDied","Data":"dd97903764a35907bf6578141aa2fba0d978f4be0fabf64cd3e83860e7ffbb9d"} Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.234791 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd97903764a35907bf6578141aa2fba0d978f4be0fabf64cd3e83860e7ffbb9d" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.234796 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.492791 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:24:23 crc kubenswrapper[4818]: E0930 17:24:23.493492 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="918ea858-4ad5-46f8-a917-608a55a2a80b" containerName="watcher-kuttl-db-sync" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.493516 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="918ea858-4ad5-46f8-a917-608a55a2a80b" containerName="watcher-kuttl-db-sync" Sep 30 17:24:23 crc kubenswrapper[4818]: E0930 17:24:23.493545 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abc9c693-7fcf-407d-a7fa-0cc2b3150bc1" containerName="registry-server" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.493553 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="abc9c693-7fcf-407d-a7fa-0cc2b3150bc1" containerName="registry-server" Sep 30 17:24:23 crc kubenswrapper[4818]: E0930 17:24:23.493567 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abc9c693-7fcf-407d-a7fa-0cc2b3150bc1" containerName="extract-content" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.493575 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="abc9c693-7fcf-407d-a7fa-0cc2b3150bc1" containerName="extract-content" Sep 30 17:24:23 crc kubenswrapper[4818]: E0930 17:24:23.493592 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abc9c693-7fcf-407d-a7fa-0cc2b3150bc1" containerName="extract-utilities" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.493602 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="abc9c693-7fcf-407d-a7fa-0cc2b3150bc1" containerName="extract-utilities" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.493813 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="abc9c693-7fcf-407d-a7fa-0cc2b3150bc1" containerName="registry-server" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.493836 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="918ea858-4ad5-46f8-a917-608a55a2a80b" containerName="watcher-kuttl-db-sync" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.494961 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.497589 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.497684 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-9sl6v" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.508868 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.535220 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.536340 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.538337 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-applier-config-data" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.560759 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.598628 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.599974 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.601894 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.606469 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.649371 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7178784-85af-469a-8b70-b6949f6580a4-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.649429 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.649452 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7178784-85af-469a-8b70-b6949f6580a4-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.649558 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a7178784-85af-469a-8b70-b6949f6580a4-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.649690 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5634d48-92cc-4cb1-9009-5bcd5ad54179-logs\") pod \"watcher-kuttl-api-0\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.649723 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.649764 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k45g8\" (UniqueName: \"kubernetes.io/projected/a7178784-85af-469a-8b70-b6949f6580a4-kube-api-access-k45g8\") pod \"watcher-kuttl-applier-0\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.649802 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7178784-85af-469a-8b70-b6949f6580a4-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.649848 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.649884 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ptvsn\" (UniqueName: \"kubernetes.io/projected/f5634d48-92cc-4cb1-9009-5bcd5ad54179-kube-api-access-ptvsn\") pod \"watcher-kuttl-api-0\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.649980 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.751436 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.751512 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7178784-85af-469a-8b70-b6949f6580a4-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.751565 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a7178784-85af-469a-8b70-b6949f6580a4-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.751636 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.751697 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.751752 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5634d48-92cc-4cb1-9009-5bcd5ad54179-logs\") pod \"watcher-kuttl-api-0\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.751785 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.751831 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k45g8\" (UniqueName: \"kubernetes.io/projected/a7178784-85af-469a-8b70-b6949f6580a4-kube-api-access-k45g8\") pod \"watcher-kuttl-applier-0\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.751861 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6778e25-7694-4fc9-9f75-b28e21e39099-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.751909 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7178784-85af-469a-8b70-b6949f6580a4-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.751977 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.752033 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7178784-85af-469a-8b70-b6949f6580a4-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.752040 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5mn2\" (UniqueName: \"kubernetes.io/projected/f6778e25-7694-4fc9-9f75-b28e21e39099-kube-api-access-f5mn2\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.752107 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.752159 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.752212 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ptvsn\" (UniqueName: \"kubernetes.io/projected/f5634d48-92cc-4cb1-9009-5bcd5ad54179-kube-api-access-ptvsn\") pod \"watcher-kuttl-api-0\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.752341 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.752406 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7178784-85af-469a-8b70-b6949f6580a4-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.752557 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5634d48-92cc-4cb1-9009-5bcd5ad54179-logs\") pod \"watcher-kuttl-api-0\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.756192 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7178784-85af-469a-8b70-b6949f6580a4-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.756454 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.758264 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.758765 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7178784-85af-469a-8b70-b6949f6580a4-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.762294 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a7178784-85af-469a-8b70-b6949f6580a4-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.764375 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.764741 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.775317 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptvsn\" (UniqueName: \"kubernetes.io/projected/f5634d48-92cc-4cb1-9009-5bcd5ad54179-kube-api-access-ptvsn\") pod \"watcher-kuttl-api-0\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.775823 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k45g8\" (UniqueName: \"kubernetes.io/projected/a7178784-85af-469a-8b70-b6949f6580a4-kube-api-access-k45g8\") pod \"watcher-kuttl-applier-0\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.815558 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.854874 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.856116 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.856157 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.856194 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6778e25-7694-4fc9-9f75-b28e21e39099-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.856220 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.856249 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5mn2\" (UniqueName: \"kubernetes.io/projected/f6778e25-7694-4fc9-9f75-b28e21e39099-kube-api-access-f5mn2\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.856268 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.857340 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6778e25-7694-4fc9-9f75-b28e21e39099-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.860526 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.862039 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.862571 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.864080 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.886608 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5mn2\" (UniqueName: \"kubernetes.io/projected/f6778e25-7694-4fc9-9f75-b28e21e39099-kube-api-access-f5mn2\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:23 crc kubenswrapper[4818]: I0930 17:24:23.926999 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:24 crc kubenswrapper[4818]: I0930 17:24:24.084560 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vg65g" Sep 30 17:24:24 crc kubenswrapper[4818]: I0930 17:24:24.085050 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vg65g" Sep 30 17:24:24 crc kubenswrapper[4818]: I0930 17:24:24.150430 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vg65g" Sep 30 17:24:24 crc kubenswrapper[4818]: I0930 17:24:24.317869 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vg65g" Sep 30 17:24:24 crc kubenswrapper[4818]: I0930 17:24:24.328907 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:24:24 crc kubenswrapper[4818]: I0930 17:24:24.334350 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:24:24 crc kubenswrapper[4818]: W0930 17:24:24.358118 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda7178784_85af_469a_8b70_b6949f6580a4.slice/crio-aa406a9918aacd54cd46e13bccf7185253f09a1164f4ab77b87ebefd66404385 WatchSource:0}: Error finding container aa406a9918aacd54cd46e13bccf7185253f09a1164f4ab77b87ebefd66404385: Status 404 returned error can't find the container with id aa406a9918aacd54cd46e13bccf7185253f09a1164f4ab77b87ebefd66404385 Sep 30 17:24:24 crc kubenswrapper[4818]: I0930 17:24:24.497095 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:24:25 crc kubenswrapper[4818]: I0930 17:24:25.290297 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"f5634d48-92cc-4cb1-9009-5bcd5ad54179","Type":"ContainerStarted","Data":"f957015d64e75cb3a2ebcdc628dbad48ff6a048cffa6abecd35687d0a0d19a96"} Sep 30 17:24:25 crc kubenswrapper[4818]: I0930 17:24:25.290536 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"f5634d48-92cc-4cb1-9009-5bcd5ad54179","Type":"ContainerStarted","Data":"11339a2da1384ee752135d142d8c7051e0cbebc5fa9d417cf308e901d45a9265"} Sep 30 17:24:25 crc kubenswrapper[4818]: I0930 17:24:25.290666 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:25 crc kubenswrapper[4818]: I0930 17:24:25.290694 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"f5634d48-92cc-4cb1-9009-5bcd5ad54179","Type":"ContainerStarted","Data":"d477723713b55fd5d7f083e6fa02aa464e961ef4cc01c263eaa32e2ab58da833"} Sep 30 17:24:25 crc kubenswrapper[4818]: I0930 17:24:25.291595 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"a7178784-85af-469a-8b70-b6949f6580a4","Type":"ContainerStarted","Data":"730e89915686204547bd5406635b878a0c4d01f7e60c039d4ccf0dd7ba653fb7"} Sep 30 17:24:25 crc kubenswrapper[4818]: I0930 17:24:25.291618 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"a7178784-85af-469a-8b70-b6949f6580a4","Type":"ContainerStarted","Data":"aa406a9918aacd54cd46e13bccf7185253f09a1164f4ab77b87ebefd66404385"} Sep 30 17:24:25 crc kubenswrapper[4818]: I0930 17:24:25.295179 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"f6778e25-7694-4fc9-9f75-b28e21e39099","Type":"ContainerStarted","Data":"0a348a50d34a23fd31682ffc32c6c847704627ae1012a3e4d9078f9f82e93aee"} Sep 30 17:24:25 crc kubenswrapper[4818]: I0930 17:24:25.295228 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"f6778e25-7694-4fc9-9f75-b28e21e39099","Type":"ContainerStarted","Data":"36cc98f21ab9bdb6e73ebfbe3db64a2833131a2ca172af033cc059fe426cc611"} Sep 30 17:24:25 crc kubenswrapper[4818]: I0930 17:24:25.314759 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=2.314737458 podStartE2EDuration="2.314737458s" podCreationTimestamp="2025-09-30 17:24:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:24:25.313630068 +0000 UTC m=+1512.067901884" watchObservedRunningTime="2025-09-30 17:24:25.314737458 +0000 UTC m=+1512.069009274" Sep 30 17:24:25 crc kubenswrapper[4818]: I0930 17:24:25.336007 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=2.335991063 podStartE2EDuration="2.335991063s" podCreationTimestamp="2025-09-30 17:24:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:24:25.332958951 +0000 UTC m=+1512.087230767" watchObservedRunningTime="2025-09-30 17:24:25.335991063 +0000 UTC m=+1512.090262879" Sep 30 17:24:25 crc kubenswrapper[4818]: I0930 17:24:25.356732 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podStartSLOduration=2.356715634 podStartE2EDuration="2.356715634s" podCreationTimestamp="2025-09-30 17:24:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:24:25.351604315 +0000 UTC m=+1512.105876141" watchObservedRunningTime="2025-09-30 17:24:25.356715634 +0000 UTC m=+1512.110987450" Sep 30 17:24:25 crc kubenswrapper[4818]: I0930 17:24:25.650684 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:25 crc kubenswrapper[4818]: I0930 17:24:25.898404 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vg65g"] Sep 30 17:24:26 crc kubenswrapper[4818]: I0930 17:24:26.794523 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:27 crc kubenswrapper[4818]: I0930 17:24:27.315651 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vg65g" podUID="fda8aee5-f56f-4875-8ef1-ce8858cdfeba" containerName="registry-server" containerID="cri-o://af3a8905b0821470104da568b542524329d233712b2a994da0bb6aed9d9286f1" gracePeriod=2 Sep 30 17:24:27 crc kubenswrapper[4818]: I0930 17:24:27.595319 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:28 crc kubenswrapper[4818]: I0930 17:24:28.013214 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:28 crc kubenswrapper[4818]: I0930 17:24:28.326895 4818 generic.go:334] "Generic (PLEG): container finished" podID="fda8aee5-f56f-4875-8ef1-ce8858cdfeba" containerID="af3a8905b0821470104da568b542524329d233712b2a994da0bb6aed9d9286f1" exitCode=0 Sep 30 17:24:28 crc kubenswrapper[4818]: I0930 17:24:28.327208 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vg65g" event={"ID":"fda8aee5-f56f-4875-8ef1-ce8858cdfeba","Type":"ContainerDied","Data":"af3a8905b0821470104da568b542524329d233712b2a994da0bb6aed9d9286f1"} Sep 30 17:24:28 crc kubenswrapper[4818]: I0930 17:24:28.327236 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vg65g" event={"ID":"fda8aee5-f56f-4875-8ef1-ce8858cdfeba","Type":"ContainerDied","Data":"ca1dc6440b60bd55e24e45bead586f8f540139efeecfb8e6974210283ea86fbf"} Sep 30 17:24:28 crc kubenswrapper[4818]: I0930 17:24:28.327250 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca1dc6440b60bd55e24e45bead586f8f540139efeecfb8e6974210283ea86fbf" Sep 30 17:24:28 crc kubenswrapper[4818]: I0930 17:24:28.355156 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vg65g" Sep 30 17:24:28 crc kubenswrapper[4818]: I0930 17:24:28.526855 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thwpk\" (UniqueName: \"kubernetes.io/projected/fda8aee5-f56f-4875-8ef1-ce8858cdfeba-kube-api-access-thwpk\") pod \"fda8aee5-f56f-4875-8ef1-ce8858cdfeba\" (UID: \"fda8aee5-f56f-4875-8ef1-ce8858cdfeba\") " Sep 30 17:24:28 crc kubenswrapper[4818]: I0930 17:24:28.526983 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fda8aee5-f56f-4875-8ef1-ce8858cdfeba-utilities\") pod \"fda8aee5-f56f-4875-8ef1-ce8858cdfeba\" (UID: \"fda8aee5-f56f-4875-8ef1-ce8858cdfeba\") " Sep 30 17:24:28 crc kubenswrapper[4818]: I0930 17:24:28.527041 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fda8aee5-f56f-4875-8ef1-ce8858cdfeba-catalog-content\") pod \"fda8aee5-f56f-4875-8ef1-ce8858cdfeba\" (UID: \"fda8aee5-f56f-4875-8ef1-ce8858cdfeba\") " Sep 30 17:24:28 crc kubenswrapper[4818]: I0930 17:24:28.528087 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fda8aee5-f56f-4875-8ef1-ce8858cdfeba-utilities" (OuterVolumeSpecName: "utilities") pod "fda8aee5-f56f-4875-8ef1-ce8858cdfeba" (UID: "fda8aee5-f56f-4875-8ef1-ce8858cdfeba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:24:28 crc kubenswrapper[4818]: I0930 17:24:28.541893 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda8aee5-f56f-4875-8ef1-ce8858cdfeba-kube-api-access-thwpk" (OuterVolumeSpecName: "kube-api-access-thwpk") pod "fda8aee5-f56f-4875-8ef1-ce8858cdfeba" (UID: "fda8aee5-f56f-4875-8ef1-ce8858cdfeba"). InnerVolumeSpecName "kube-api-access-thwpk". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:24:28 crc kubenswrapper[4818]: I0930 17:24:28.569059 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fda8aee5-f56f-4875-8ef1-ce8858cdfeba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fda8aee5-f56f-4875-8ef1-ce8858cdfeba" (UID: "fda8aee5-f56f-4875-8ef1-ce8858cdfeba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:24:28 crc kubenswrapper[4818]: I0930 17:24:28.629128 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fda8aee5-f56f-4875-8ef1-ce8858cdfeba-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:28 crc kubenswrapper[4818]: I0930 17:24:28.629172 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fda8aee5-f56f-4875-8ef1-ce8858cdfeba-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:28 crc kubenswrapper[4818]: I0930 17:24:28.629182 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thwpk\" (UniqueName: \"kubernetes.io/projected/fda8aee5-f56f-4875-8ef1-ce8858cdfeba-kube-api-access-thwpk\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:28 crc kubenswrapper[4818]: I0930 17:24:28.815805 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:28 crc kubenswrapper[4818]: I0930 17:24:28.855191 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:29 crc kubenswrapper[4818]: I0930 17:24:29.235956 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:29 crc kubenswrapper[4818]: I0930 17:24:29.334157 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vg65g" Sep 30 17:24:29 crc kubenswrapper[4818]: I0930 17:24:29.359354 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vg65g"] Sep 30 17:24:29 crc kubenswrapper[4818]: I0930 17:24:29.365467 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vg65g"] Sep 30 17:24:30 crc kubenswrapper[4818]: I0930 17:24:30.030159 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda8aee5-f56f-4875-8ef1-ce8858cdfeba" path="/var/lib/kubelet/pods/fda8aee5-f56f-4875-8ef1-ce8858cdfeba/volumes" Sep 30 17:24:30 crc kubenswrapper[4818]: I0930 17:24:30.453870 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:31 crc kubenswrapper[4818]: I0930 17:24:31.321186 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:31 crc kubenswrapper[4818]: I0930 17:24:31.663936 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:32 crc kubenswrapper[4818]: I0930 17:24:32.892385 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:33 crc kubenswrapper[4818]: I0930 17:24:33.815892 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:33 crc kubenswrapper[4818]: I0930 17:24:33.819721 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:33 crc kubenswrapper[4818]: I0930 17:24:33.861172 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:33 crc kubenswrapper[4818]: I0930 17:24:33.887382 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:33 crc kubenswrapper[4818]: I0930 17:24:33.927459 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:33 crc kubenswrapper[4818]: I0930 17:24:33.972298 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:34 crc kubenswrapper[4818]: I0930 17:24:34.103271 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:34 crc kubenswrapper[4818]: I0930 17:24:34.382754 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:34 crc kubenswrapper[4818]: I0930 17:24:34.388255 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:24:34 crc kubenswrapper[4818]: I0930 17:24:34.408553 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:24:34 crc kubenswrapper[4818]: I0930 17:24:34.411493 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:24:35 crc kubenswrapper[4818]: I0930 17:24:35.285157 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:35 crc kubenswrapper[4818]: I0930 17:24:35.520911 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:35 crc kubenswrapper[4818]: I0930 17:24:35.844867 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinder-db-create-44kbl"] Sep 30 17:24:35 crc kubenswrapper[4818]: E0930 17:24:35.845459 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fda8aee5-f56f-4875-8ef1-ce8858cdfeba" containerName="extract-content" Sep 30 17:24:35 crc kubenswrapper[4818]: I0930 17:24:35.845475 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="fda8aee5-f56f-4875-8ef1-ce8858cdfeba" containerName="extract-content" Sep 30 17:24:35 crc kubenswrapper[4818]: E0930 17:24:35.845492 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fda8aee5-f56f-4875-8ef1-ce8858cdfeba" containerName="registry-server" Sep 30 17:24:35 crc kubenswrapper[4818]: I0930 17:24:35.845498 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="fda8aee5-f56f-4875-8ef1-ce8858cdfeba" containerName="registry-server" Sep 30 17:24:35 crc kubenswrapper[4818]: E0930 17:24:35.845510 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fda8aee5-f56f-4875-8ef1-ce8858cdfeba" containerName="extract-utilities" Sep 30 17:24:35 crc kubenswrapper[4818]: I0930 17:24:35.845516 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="fda8aee5-f56f-4875-8ef1-ce8858cdfeba" containerName="extract-utilities" Sep 30 17:24:35 crc kubenswrapper[4818]: I0930 17:24:35.845678 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="fda8aee5-f56f-4875-8ef1-ce8858cdfeba" containerName="registry-server" Sep 30 17:24:35 crc kubenswrapper[4818]: I0930 17:24:35.846267 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-db-create-44kbl" Sep 30 17:24:35 crc kubenswrapper[4818]: I0930 17:24:35.853517 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-db-create-44kbl"] Sep 30 17:24:36 crc kubenswrapper[4818]: I0930 17:24:36.021206 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:24:36 crc kubenswrapper[4818]: E0930 17:24:36.021453 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:24:36 crc kubenswrapper[4818]: I0930 17:24:36.046441 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggq67\" (UniqueName: \"kubernetes.io/projected/7b688e71-d6a3-44cc-a380-223370ad26ad-kube-api-access-ggq67\") pod \"cinder-db-create-44kbl\" (UID: \"7b688e71-d6a3-44cc-a380-223370ad26ad\") " pod="watcher-kuttl-default/cinder-db-create-44kbl" Sep 30 17:24:36 crc kubenswrapper[4818]: I0930 17:24:36.148436 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggq67\" (UniqueName: \"kubernetes.io/projected/7b688e71-d6a3-44cc-a380-223370ad26ad-kube-api-access-ggq67\") pod \"cinder-db-create-44kbl\" (UID: \"7b688e71-d6a3-44cc-a380-223370ad26ad\") " pod="watcher-kuttl-default/cinder-db-create-44kbl" Sep 30 17:24:36 crc kubenswrapper[4818]: I0930 17:24:36.176905 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggq67\" (UniqueName: \"kubernetes.io/projected/7b688e71-d6a3-44cc-a380-223370ad26ad-kube-api-access-ggq67\") pod \"cinder-db-create-44kbl\" (UID: \"7b688e71-d6a3-44cc-a380-223370ad26ad\") " pod="watcher-kuttl-default/cinder-db-create-44kbl" Sep 30 17:24:36 crc kubenswrapper[4818]: I0930 17:24:36.462153 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-db-create-44kbl" Sep 30 17:24:36 crc kubenswrapper[4818]: I0930 17:24:36.740316 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:36 crc kubenswrapper[4818]: I0930 17:24:36.972853 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-db-create-44kbl"] Sep 30 17:24:37 crc kubenswrapper[4818]: I0930 17:24:37.009436 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:24:37 crc kubenswrapper[4818]: I0930 17:24:37.009719 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerName="ceilometer-central-agent" containerID="cri-o://ecebfeab2a570638762e2eda6c40bbb0e0a17383f2d701272e997bc92bec699c" gracePeriod=30 Sep 30 17:24:37 crc kubenswrapper[4818]: I0930 17:24:37.010082 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerName="proxy-httpd" containerID="cri-o://90f437f7355d593fc809853050fc4f0ec8fad13d7f0be3ea7291bc93d3f2ee91" gracePeriod=30 Sep 30 17:24:37 crc kubenswrapper[4818]: I0930 17:24:37.010122 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerName="sg-core" containerID="cri-o://bd6b5ff08f05e420c69c13ca88b8d01ca319ff85ffca77776d815a668c7b6b2d" gracePeriod=30 Sep 30 17:24:37 crc kubenswrapper[4818]: I0930 17:24:37.010155 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerName="ceilometer-notification-agent" containerID="cri-o://a1358b0acd8da7bc2e6cbcf0282718bf2f2089d53a207ccad6314d59c3ced096" gracePeriod=30 Sep 30 17:24:37 crc kubenswrapper[4818]: I0930 17:24:37.408524 4818 generic.go:334] "Generic (PLEG): container finished" podID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerID="90f437f7355d593fc809853050fc4f0ec8fad13d7f0be3ea7291bc93d3f2ee91" exitCode=0 Sep 30 17:24:37 crc kubenswrapper[4818]: I0930 17:24:37.408554 4818 generic.go:334] "Generic (PLEG): container finished" podID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerID="bd6b5ff08f05e420c69c13ca88b8d01ca319ff85ffca77776d815a668c7b6b2d" exitCode=2 Sep 30 17:24:37 crc kubenswrapper[4818]: I0930 17:24:37.408561 4818 generic.go:334] "Generic (PLEG): container finished" podID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerID="ecebfeab2a570638762e2eda6c40bbb0e0a17383f2d701272e997bc92bec699c" exitCode=0 Sep 30 17:24:37 crc kubenswrapper[4818]: I0930 17:24:37.408600 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"115e3e75-6f62-41c2-80a9-b4b21d63b3ad","Type":"ContainerDied","Data":"90f437f7355d593fc809853050fc4f0ec8fad13d7f0be3ea7291bc93d3f2ee91"} Sep 30 17:24:37 crc kubenswrapper[4818]: I0930 17:24:37.408626 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"115e3e75-6f62-41c2-80a9-b4b21d63b3ad","Type":"ContainerDied","Data":"bd6b5ff08f05e420c69c13ca88b8d01ca319ff85ffca77776d815a668c7b6b2d"} Sep 30 17:24:37 crc kubenswrapper[4818]: I0930 17:24:37.408635 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"115e3e75-6f62-41c2-80a9-b4b21d63b3ad","Type":"ContainerDied","Data":"ecebfeab2a570638762e2eda6c40bbb0e0a17383f2d701272e997bc92bec699c"} Sep 30 17:24:37 crc kubenswrapper[4818]: I0930 17:24:37.412506 4818 generic.go:334] "Generic (PLEG): container finished" podID="7b688e71-d6a3-44cc-a380-223370ad26ad" containerID="1874469c28fd86902b9653580085c4953c8e52473fec3f05bfebbcc28b9c2103" exitCode=0 Sep 30 17:24:37 crc kubenswrapper[4818]: I0930 17:24:37.412557 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-db-create-44kbl" event={"ID":"7b688e71-d6a3-44cc-a380-223370ad26ad","Type":"ContainerDied","Data":"1874469c28fd86902b9653580085c4953c8e52473fec3f05bfebbcc28b9c2103"} Sep 30 17:24:37 crc kubenswrapper[4818]: I0930 17:24:37.412592 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-db-create-44kbl" event={"ID":"7b688e71-d6a3-44cc-a380-223370ad26ad","Type":"ContainerStarted","Data":"4a6b9dab2667c31e19c321e889a6119593d59e3587eb6aee05b2319c4a812a3c"} Sep 30 17:24:37 crc kubenswrapper[4818]: I0930 17:24:37.973409 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:38 crc kubenswrapper[4818]: I0930 17:24:38.798313 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-db-create-44kbl" Sep 30 17:24:38 crc kubenswrapper[4818]: I0930 17:24:38.905466 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ggq67\" (UniqueName: \"kubernetes.io/projected/7b688e71-d6a3-44cc-a380-223370ad26ad-kube-api-access-ggq67\") pod \"7b688e71-d6a3-44cc-a380-223370ad26ad\" (UID: \"7b688e71-d6a3-44cc-a380-223370ad26ad\") " Sep 30 17:24:38 crc kubenswrapper[4818]: I0930 17:24:38.911337 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b688e71-d6a3-44cc-a380-223370ad26ad-kube-api-access-ggq67" (OuterVolumeSpecName: "kube-api-access-ggq67") pod "7b688e71-d6a3-44cc-a380-223370ad26ad" (UID: "7b688e71-d6a3-44cc-a380-223370ad26ad"). InnerVolumeSpecName "kube-api-access-ggq67". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:24:39 crc kubenswrapper[4818]: I0930 17:24:39.007224 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ggq67\" (UniqueName: \"kubernetes.io/projected/7b688e71-d6a3-44cc-a380-223370ad26ad-kube-api-access-ggq67\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:39 crc kubenswrapper[4818]: I0930 17:24:39.144018 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:39 crc kubenswrapper[4818]: I0930 17:24:39.437712 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-db-create-44kbl" event={"ID":"7b688e71-d6a3-44cc-a380-223370ad26ad","Type":"ContainerDied","Data":"4a6b9dab2667c31e19c321e889a6119593d59e3587eb6aee05b2319c4a812a3c"} Sep 30 17:24:39 crc kubenswrapper[4818]: I0930 17:24:39.437757 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4a6b9dab2667c31e19c321e889a6119593d59e3587eb6aee05b2319c4a812a3c" Sep 30 17:24:39 crc kubenswrapper[4818]: I0930 17:24:39.437827 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-db-create-44kbl" Sep 30 17:24:39 crc kubenswrapper[4818]: I0930 17:24:39.899155 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.025429 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-scripts\") pod \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.025484 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dzvwx\" (UniqueName: \"kubernetes.io/projected/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-kube-api-access-dzvwx\") pod \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.025505 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-ceilometer-tls-certs\") pod \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.025563 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-log-httpd\") pod \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.025593 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-config-data\") pod \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.025634 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-run-httpd\") pod \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.025671 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-sg-core-conf-yaml\") pod \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.025768 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-combined-ca-bundle\") pod \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\" (UID: \"115e3e75-6f62-41c2-80a9-b4b21d63b3ad\") " Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.026417 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "115e3e75-6f62-41c2-80a9-b4b21d63b3ad" (UID: "115e3e75-6f62-41c2-80a9-b4b21d63b3ad"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.026965 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "115e3e75-6f62-41c2-80a9-b4b21d63b3ad" (UID: "115e3e75-6f62-41c2-80a9-b4b21d63b3ad"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.029633 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-kube-api-access-dzvwx" (OuterVolumeSpecName: "kube-api-access-dzvwx") pod "115e3e75-6f62-41c2-80a9-b4b21d63b3ad" (UID: "115e3e75-6f62-41c2-80a9-b4b21d63b3ad"). InnerVolumeSpecName "kube-api-access-dzvwx". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.043165 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-scripts" (OuterVolumeSpecName: "scripts") pod "115e3e75-6f62-41c2-80a9-b4b21d63b3ad" (UID: "115e3e75-6f62-41c2-80a9-b4b21d63b3ad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.072662 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "115e3e75-6f62-41c2-80a9-b4b21d63b3ad" (UID: "115e3e75-6f62-41c2-80a9-b4b21d63b3ad"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.075400 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "115e3e75-6f62-41c2-80a9-b4b21d63b3ad" (UID: "115e3e75-6f62-41c2-80a9-b4b21d63b3ad"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.088743 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "115e3e75-6f62-41c2-80a9-b4b21d63b3ad" (UID: "115e3e75-6f62-41c2-80a9-b4b21d63b3ad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.119669 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-config-data" (OuterVolumeSpecName: "config-data") pod "115e3e75-6f62-41c2-80a9-b4b21d63b3ad" (UID: "115e3e75-6f62-41c2-80a9-b4b21d63b3ad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.127838 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.127867 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.127877 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dzvwx\" (UniqueName: \"kubernetes.io/projected/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-kube-api-access-dzvwx\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.127887 4818 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.127895 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.127976 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.127987 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.127994 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/115e3e75-6f62-41c2-80a9-b4b21d63b3ad-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.342363 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.454528 4818 generic.go:334] "Generic (PLEG): container finished" podID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerID="a1358b0acd8da7bc2e6cbcf0282718bf2f2089d53a207ccad6314d59c3ced096" exitCode=0 Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.454769 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"115e3e75-6f62-41c2-80a9-b4b21d63b3ad","Type":"ContainerDied","Data":"a1358b0acd8da7bc2e6cbcf0282718bf2f2089d53a207ccad6314d59c3ced096"} Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.454977 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"115e3e75-6f62-41c2-80a9-b4b21d63b3ad","Type":"ContainerDied","Data":"e812ba045a93062b46d45760ef60fe575a9a5f51a97dadaf8212f24df89887e6"} Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.454839 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.455027 4818 scope.go:117] "RemoveContainer" containerID="90f437f7355d593fc809853050fc4f0ec8fad13d7f0be3ea7291bc93d3f2ee91" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.489714 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.501125 4818 scope.go:117] "RemoveContainer" containerID="bd6b5ff08f05e420c69c13ca88b8d01ca319ff85ffca77776d815a668c7b6b2d" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.503140 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.525761 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:24:40 crc kubenswrapper[4818]: E0930 17:24:40.526091 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b688e71-d6a3-44cc-a380-223370ad26ad" containerName="mariadb-database-create" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.526109 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b688e71-d6a3-44cc-a380-223370ad26ad" containerName="mariadb-database-create" Sep 30 17:24:40 crc kubenswrapper[4818]: E0930 17:24:40.526126 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerName="ceilometer-notification-agent" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.526134 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerName="ceilometer-notification-agent" Sep 30 17:24:40 crc kubenswrapper[4818]: E0930 17:24:40.526145 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerName="sg-core" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.526153 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerName="sg-core" Sep 30 17:24:40 crc kubenswrapper[4818]: E0930 17:24:40.526166 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerName="ceilometer-central-agent" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.526171 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerName="ceilometer-central-agent" Sep 30 17:24:40 crc kubenswrapper[4818]: E0930 17:24:40.526183 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerName="proxy-httpd" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.526188 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerName="proxy-httpd" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.526341 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerName="proxy-httpd" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.526349 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerName="ceilometer-notification-agent" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.526364 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b688e71-d6a3-44cc-a380-223370ad26ad" containerName="mariadb-database-create" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.526371 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerName="ceilometer-central-agent" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.526381 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" containerName="sg-core" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.527775 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.531885 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.532184 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.532415 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.540712 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.541350 4818 scope.go:117] "RemoveContainer" containerID="a1358b0acd8da7bc2e6cbcf0282718bf2f2089d53a207ccad6314d59c3ced096" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.569506 4818 scope.go:117] "RemoveContainer" containerID="ecebfeab2a570638762e2eda6c40bbb0e0a17383f2d701272e997bc92bec699c" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.589455 4818 scope.go:117] "RemoveContainer" containerID="90f437f7355d593fc809853050fc4f0ec8fad13d7f0be3ea7291bc93d3f2ee91" Sep 30 17:24:40 crc kubenswrapper[4818]: E0930 17:24:40.589835 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90f437f7355d593fc809853050fc4f0ec8fad13d7f0be3ea7291bc93d3f2ee91\": container with ID starting with 90f437f7355d593fc809853050fc4f0ec8fad13d7f0be3ea7291bc93d3f2ee91 not found: ID does not exist" containerID="90f437f7355d593fc809853050fc4f0ec8fad13d7f0be3ea7291bc93d3f2ee91" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.589876 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90f437f7355d593fc809853050fc4f0ec8fad13d7f0be3ea7291bc93d3f2ee91"} err="failed to get container status \"90f437f7355d593fc809853050fc4f0ec8fad13d7f0be3ea7291bc93d3f2ee91\": rpc error: code = NotFound desc = could not find container \"90f437f7355d593fc809853050fc4f0ec8fad13d7f0be3ea7291bc93d3f2ee91\": container with ID starting with 90f437f7355d593fc809853050fc4f0ec8fad13d7f0be3ea7291bc93d3f2ee91 not found: ID does not exist" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.589895 4818 scope.go:117] "RemoveContainer" containerID="bd6b5ff08f05e420c69c13ca88b8d01ca319ff85ffca77776d815a668c7b6b2d" Sep 30 17:24:40 crc kubenswrapper[4818]: E0930 17:24:40.590220 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd6b5ff08f05e420c69c13ca88b8d01ca319ff85ffca77776d815a668c7b6b2d\": container with ID starting with bd6b5ff08f05e420c69c13ca88b8d01ca319ff85ffca77776d815a668c7b6b2d not found: ID does not exist" containerID="bd6b5ff08f05e420c69c13ca88b8d01ca319ff85ffca77776d815a668c7b6b2d" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.590247 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd6b5ff08f05e420c69c13ca88b8d01ca319ff85ffca77776d815a668c7b6b2d"} err="failed to get container status \"bd6b5ff08f05e420c69c13ca88b8d01ca319ff85ffca77776d815a668c7b6b2d\": rpc error: code = NotFound desc = could not find container \"bd6b5ff08f05e420c69c13ca88b8d01ca319ff85ffca77776d815a668c7b6b2d\": container with ID starting with bd6b5ff08f05e420c69c13ca88b8d01ca319ff85ffca77776d815a668c7b6b2d not found: ID does not exist" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.590265 4818 scope.go:117] "RemoveContainer" containerID="a1358b0acd8da7bc2e6cbcf0282718bf2f2089d53a207ccad6314d59c3ced096" Sep 30 17:24:40 crc kubenswrapper[4818]: E0930 17:24:40.590654 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1358b0acd8da7bc2e6cbcf0282718bf2f2089d53a207ccad6314d59c3ced096\": container with ID starting with a1358b0acd8da7bc2e6cbcf0282718bf2f2089d53a207ccad6314d59c3ced096 not found: ID does not exist" containerID="a1358b0acd8da7bc2e6cbcf0282718bf2f2089d53a207ccad6314d59c3ced096" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.590674 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1358b0acd8da7bc2e6cbcf0282718bf2f2089d53a207ccad6314d59c3ced096"} err="failed to get container status \"a1358b0acd8da7bc2e6cbcf0282718bf2f2089d53a207ccad6314d59c3ced096\": rpc error: code = NotFound desc = could not find container \"a1358b0acd8da7bc2e6cbcf0282718bf2f2089d53a207ccad6314d59c3ced096\": container with ID starting with a1358b0acd8da7bc2e6cbcf0282718bf2f2089d53a207ccad6314d59c3ced096 not found: ID does not exist" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.590687 4818 scope.go:117] "RemoveContainer" containerID="ecebfeab2a570638762e2eda6c40bbb0e0a17383f2d701272e997bc92bec699c" Sep 30 17:24:40 crc kubenswrapper[4818]: E0930 17:24:40.591113 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ecebfeab2a570638762e2eda6c40bbb0e0a17383f2d701272e997bc92bec699c\": container with ID starting with ecebfeab2a570638762e2eda6c40bbb0e0a17383f2d701272e997bc92bec699c not found: ID does not exist" containerID="ecebfeab2a570638762e2eda6c40bbb0e0a17383f2d701272e997bc92bec699c" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.591134 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecebfeab2a570638762e2eda6c40bbb0e0a17383f2d701272e997bc92bec699c"} err="failed to get container status \"ecebfeab2a570638762e2eda6c40bbb0e0a17383f2d701272e997bc92bec699c\": rpc error: code = NotFound desc = could not find container \"ecebfeab2a570638762e2eda6c40bbb0e0a17383f2d701272e997bc92bec699c\": container with ID starting with ecebfeab2a570638762e2eda6c40bbb0e0a17383f2d701272e997bc92bec699c not found: ID does not exist" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.637885 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-scripts\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.638106 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.638267 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-config-data\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.638312 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-run-httpd\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.638371 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-log-httpd\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.638483 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7f6cj\" (UniqueName: \"kubernetes.io/projected/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-kube-api-access-7f6cj\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.638613 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.638696 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.740378 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-scripts\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.740494 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.740546 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-config-data\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.740569 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-run-httpd\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.740593 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-log-httpd\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.740632 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7f6cj\" (UniqueName: \"kubernetes.io/projected/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-kube-api-access-7f6cj\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.740972 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.741244 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-run-httpd\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.741344 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-log-httpd\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.741691 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.746419 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.746536 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.749265 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.750328 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-config-data\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.753452 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-scripts\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.761741 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7f6cj\" (UniqueName: \"kubernetes.io/projected/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-kube-api-access-7f6cj\") pod \"ceilometer-0\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:40 crc kubenswrapper[4818]: I0930 17:24:40.848535 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:41 crc kubenswrapper[4818]: I0930 17:24:41.329648 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:24:41 crc kubenswrapper[4818]: W0930 17:24:41.335900 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e83d5a3_fda5_4a18_bbf4_ac4e6a28014f.slice/crio-98aff47452a9135c392cc83e8f4d6c18126e138f0ae4da2b7ac87ea4e24eb08b WatchSource:0}: Error finding container 98aff47452a9135c392cc83e8f4d6c18126e138f0ae4da2b7ac87ea4e24eb08b: Status 404 returned error can't find the container with id 98aff47452a9135c392cc83e8f4d6c18126e138f0ae4da2b7ac87ea4e24eb08b Sep 30 17:24:41 crc kubenswrapper[4818]: I0930 17:24:41.337961 4818 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 30 17:24:41 crc kubenswrapper[4818]: I0930 17:24:41.464265 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f","Type":"ContainerStarted","Data":"98aff47452a9135c392cc83e8f4d6c18126e138f0ae4da2b7ac87ea4e24eb08b"} Sep 30 17:24:41 crc kubenswrapper[4818]: I0930 17:24:41.513767 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:42 crc kubenswrapper[4818]: I0930 17:24:42.029776 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="115e3e75-6f62-41c2-80a9-b4b21d63b3ad" path="/var/lib/kubelet/pods/115e3e75-6f62-41c2-80a9-b4b21d63b3ad/volumes" Sep 30 17:24:42 crc kubenswrapper[4818]: I0930 17:24:42.474131 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f","Type":"ContainerStarted","Data":"f9d269a88531a325af77e204a349f1a4e83f80539d65177d99d2449ac1fc6562"} Sep 30 17:24:42 crc kubenswrapper[4818]: I0930 17:24:42.699069 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:43 crc kubenswrapper[4818]: I0930 17:24:43.487736 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f","Type":"ContainerStarted","Data":"5bdd18722c833531c7421f248f5aa6befa10ec60906dc839d1dd31024103f5d5"} Sep 30 17:24:43 crc kubenswrapper[4818]: I0930 17:24:43.487785 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f","Type":"ContainerStarted","Data":"b76379470dffe183f2b18fa05822eb74ab83756077433ba1cbbee3c8e6ea118d"} Sep 30 17:24:43 crc kubenswrapper[4818]: I0930 17:24:43.884844 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:45 crc kubenswrapper[4818]: I0930 17:24:45.053139 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:45 crc kubenswrapper[4818]: I0930 17:24:45.514841 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f","Type":"ContainerStarted","Data":"f1bebd7390532617ddf99abd6c925fe5a652476aa71ad38bb78a3ad9358204ac"} Sep 30 17:24:45 crc kubenswrapper[4818]: I0930 17:24:45.558681 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=2.502465032 podStartE2EDuration="5.55866322s" podCreationTimestamp="2025-09-30 17:24:40 +0000 UTC" firstStartedPulling="2025-09-30 17:24:41.337691071 +0000 UTC m=+1528.091962887" lastFinishedPulling="2025-09-30 17:24:44.393889259 +0000 UTC m=+1531.148161075" observedRunningTime="2025-09-30 17:24:45.557509399 +0000 UTC m=+1532.311781225" watchObservedRunningTime="2025-09-30 17:24:45.55866322 +0000 UTC m=+1532.312935036" Sep 30 17:24:45 crc kubenswrapper[4818]: I0930 17:24:45.914459 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinder-415e-account-create-4h58z"] Sep 30 17:24:45 crc kubenswrapper[4818]: I0930 17:24:45.915713 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-415e-account-create-4h58z" Sep 30 17:24:45 crc kubenswrapper[4818]: I0930 17:24:45.919283 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-db-secret" Sep 30 17:24:45 crc kubenswrapper[4818]: I0930 17:24:45.975798 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-415e-account-create-4h58z"] Sep 30 17:24:46 crc kubenswrapper[4818]: I0930 17:24:46.102007 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2tkx\" (UniqueName: \"kubernetes.io/projected/50baa69b-1cfe-4b25-8186-5a21b7d9c889-kube-api-access-r2tkx\") pod \"cinder-415e-account-create-4h58z\" (UID: \"50baa69b-1cfe-4b25-8186-5a21b7d9c889\") " pod="watcher-kuttl-default/cinder-415e-account-create-4h58z" Sep 30 17:24:46 crc kubenswrapper[4818]: I0930 17:24:46.203880 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2tkx\" (UniqueName: \"kubernetes.io/projected/50baa69b-1cfe-4b25-8186-5a21b7d9c889-kube-api-access-r2tkx\") pod \"cinder-415e-account-create-4h58z\" (UID: \"50baa69b-1cfe-4b25-8186-5a21b7d9c889\") " pod="watcher-kuttl-default/cinder-415e-account-create-4h58z" Sep 30 17:24:46 crc kubenswrapper[4818]: I0930 17:24:46.227083 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2tkx\" (UniqueName: \"kubernetes.io/projected/50baa69b-1cfe-4b25-8186-5a21b7d9c889-kube-api-access-r2tkx\") pod \"cinder-415e-account-create-4h58z\" (UID: \"50baa69b-1cfe-4b25-8186-5a21b7d9c889\") " pod="watcher-kuttl-default/cinder-415e-account-create-4h58z" Sep 30 17:24:46 crc kubenswrapper[4818]: I0930 17:24:46.240332 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-415e-account-create-4h58z" Sep 30 17:24:46 crc kubenswrapper[4818]: I0930 17:24:46.245501 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:46 crc kubenswrapper[4818]: I0930 17:24:46.523006 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:24:46 crc kubenswrapper[4818]: I0930 17:24:46.710465 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-415e-account-create-4h58z"] Sep 30 17:24:47 crc kubenswrapper[4818]: I0930 17:24:47.482571 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:47 crc kubenswrapper[4818]: I0930 17:24:47.539719 4818 generic.go:334] "Generic (PLEG): container finished" podID="50baa69b-1cfe-4b25-8186-5a21b7d9c889" containerID="03712bb8cdcdc7a35c6a20dc3c6225800a9bbd4a22a43c01f7e5a377042bdf39" exitCode=0 Sep 30 17:24:47 crc kubenswrapper[4818]: I0930 17:24:47.539782 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-415e-account-create-4h58z" event={"ID":"50baa69b-1cfe-4b25-8186-5a21b7d9c889","Type":"ContainerDied","Data":"03712bb8cdcdc7a35c6a20dc3c6225800a9bbd4a22a43c01f7e5a377042bdf39"} Sep 30 17:24:47 crc kubenswrapper[4818]: I0930 17:24:47.539852 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-415e-account-create-4h58z" event={"ID":"50baa69b-1cfe-4b25-8186-5a21b7d9c889","Type":"ContainerStarted","Data":"b59c2c3ad925c5ff92b4e5cbfff0b7c25fd67503cf6c2308b1229ff59d199151"} Sep 30 17:24:48 crc kubenswrapper[4818]: I0930 17:24:48.728086 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:48 crc kubenswrapper[4818]: I0930 17:24:48.943863 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-415e-account-create-4h58z" Sep 30 17:24:48 crc kubenswrapper[4818]: I0930 17:24:48.951042 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2tkx\" (UniqueName: \"kubernetes.io/projected/50baa69b-1cfe-4b25-8186-5a21b7d9c889-kube-api-access-r2tkx\") pod \"50baa69b-1cfe-4b25-8186-5a21b7d9c889\" (UID: \"50baa69b-1cfe-4b25-8186-5a21b7d9c889\") " Sep 30 17:24:48 crc kubenswrapper[4818]: I0930 17:24:48.959946 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50baa69b-1cfe-4b25-8186-5a21b7d9c889-kube-api-access-r2tkx" (OuterVolumeSpecName: "kube-api-access-r2tkx") pod "50baa69b-1cfe-4b25-8186-5a21b7d9c889" (UID: "50baa69b-1cfe-4b25-8186-5a21b7d9c889"). InnerVolumeSpecName "kube-api-access-r2tkx". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:24:49 crc kubenswrapper[4818]: I0930 17:24:49.052897 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2tkx\" (UniqueName: \"kubernetes.io/projected/50baa69b-1cfe-4b25-8186-5a21b7d9c889-kube-api-access-r2tkx\") on node \"crc\" DevicePath \"\"" Sep 30 17:24:49 crc kubenswrapper[4818]: I0930 17:24:49.561948 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-415e-account-create-4h58z" event={"ID":"50baa69b-1cfe-4b25-8186-5a21b7d9c889","Type":"ContainerDied","Data":"b59c2c3ad925c5ff92b4e5cbfff0b7c25fd67503cf6c2308b1229ff59d199151"} Sep 30 17:24:49 crc kubenswrapper[4818]: I0930 17:24:49.561993 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b59c2c3ad925c5ff92b4e5cbfff0b7c25fd67503cf6c2308b1229ff59d199151" Sep 30 17:24:49 crc kubenswrapper[4818]: I0930 17:24:49.562036 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-415e-account-create-4h58z" Sep 30 17:24:49 crc kubenswrapper[4818]: I0930 17:24:49.923122 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:50 crc kubenswrapper[4818]: I0930 17:24:50.022106 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:24:50 crc kubenswrapper[4818]: E0930 17:24:50.022735 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.132526 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.153347 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinder-db-sync-fns5p"] Sep 30 17:24:51 crc kubenswrapper[4818]: E0930 17:24:51.153679 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50baa69b-1cfe-4b25-8186-5a21b7d9c889" containerName="mariadb-account-create" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.153699 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="50baa69b-1cfe-4b25-8186-5a21b7d9c889" containerName="mariadb-account-create" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.153947 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="50baa69b-1cfe-4b25-8186-5a21b7d9c889" containerName="mariadb-account-create" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.154612 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.156294 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-scripts" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.156510 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-cinder-dockercfg-qxzts" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.157277 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-config-data" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.168186 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-db-sync-fns5p"] Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.190074 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vr7st\" (UniqueName: \"kubernetes.io/projected/fe2377e1-d738-46b3-8f4c-d22503a2a648-kube-api-access-vr7st\") pod \"cinder-db-sync-fns5p\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.190136 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-db-sync-config-data\") pod \"cinder-db-sync-fns5p\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.190176 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fe2377e1-d738-46b3-8f4c-d22503a2a648-etc-machine-id\") pod \"cinder-db-sync-fns5p\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.190218 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-config-data\") pod \"cinder-db-sync-fns5p\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.190250 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-scripts\") pod \"cinder-db-sync-fns5p\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.190373 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-combined-ca-bundle\") pod \"cinder-db-sync-fns5p\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.291823 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-config-data\") pod \"cinder-db-sync-fns5p\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.291910 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-scripts\") pod \"cinder-db-sync-fns5p\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.292017 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-combined-ca-bundle\") pod \"cinder-db-sync-fns5p\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.292710 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vr7st\" (UniqueName: \"kubernetes.io/projected/fe2377e1-d738-46b3-8f4c-d22503a2a648-kube-api-access-vr7st\") pod \"cinder-db-sync-fns5p\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.292787 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-db-sync-config-data\") pod \"cinder-db-sync-fns5p\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.292835 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fe2377e1-d738-46b3-8f4c-d22503a2a648-etc-machine-id\") pod \"cinder-db-sync-fns5p\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.292958 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fe2377e1-d738-46b3-8f4c-d22503a2a648-etc-machine-id\") pod \"cinder-db-sync-fns5p\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.300632 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-scripts\") pod \"cinder-db-sync-fns5p\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.300632 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-db-sync-config-data\") pod \"cinder-db-sync-fns5p\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.300788 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-combined-ca-bundle\") pod \"cinder-db-sync-fns5p\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.300988 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-config-data\") pod \"cinder-db-sync-fns5p\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.309516 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vr7st\" (UniqueName: \"kubernetes.io/projected/fe2377e1-d738-46b3-8f4c-d22503a2a648-kube-api-access-vr7st\") pod \"cinder-db-sync-fns5p\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.472601 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:24:51 crc kubenswrapper[4818]: I0930 17:24:51.914315 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-db-sync-fns5p"] Sep 30 17:24:52 crc kubenswrapper[4818]: I0930 17:24:52.332234 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:52 crc kubenswrapper[4818]: I0930 17:24:52.594241 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-db-sync-fns5p" event={"ID":"fe2377e1-d738-46b3-8f4c-d22503a2a648","Type":"ContainerStarted","Data":"bf769164353a126e2d135475af57ccdc92e2aaa1abe47e306d2cfaddcd161f51"} Sep 30 17:24:53 crc kubenswrapper[4818]: I0930 17:24:53.536672 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:54 crc kubenswrapper[4818]: I0930 17:24:54.761389 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:55 crc kubenswrapper[4818]: I0930 17:24:55.962801 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:57 crc kubenswrapper[4818]: I0930 17:24:57.161299 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:58 crc kubenswrapper[4818]: I0930 17:24:58.359892 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:24:59 crc kubenswrapper[4818]: I0930 17:24:59.558482 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:00 crc kubenswrapper[4818]: I0930 17:25:00.755809 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:01 crc kubenswrapper[4818]: I0930 17:25:01.020671 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:25:01 crc kubenswrapper[4818]: E0930 17:25:01.021023 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:25:01 crc kubenswrapper[4818]: I0930 17:25:01.968245 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:03 crc kubenswrapper[4818]: I0930 17:25:03.177565 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:04 crc kubenswrapper[4818]: I0930 17:25:04.422196 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:05 crc kubenswrapper[4818]: I0930 17:25:05.620313 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:06 crc kubenswrapper[4818]: I0930 17:25:06.839870 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:07 crc kubenswrapper[4818]: I0930 17:25:07.757418 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-db-sync-fns5p" event={"ID":"fe2377e1-d738-46b3-8f4c-d22503a2a648","Type":"ContainerStarted","Data":"c38d84203642ce7e577b1c2f01c9149000e12173cbdcc51807297f44c17d74f6"} Sep 30 17:25:07 crc kubenswrapper[4818]: I0930 17:25:07.812015 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/cinder-db-sync-fns5p" podStartSLOduration=2.379877685 podStartE2EDuration="16.811986201s" podCreationTimestamp="2025-09-30 17:24:51 +0000 UTC" firstStartedPulling="2025-09-30 17:24:51.93068422 +0000 UTC m=+1538.684956036" lastFinishedPulling="2025-09-30 17:25:06.362792736 +0000 UTC m=+1553.117064552" observedRunningTime="2025-09-30 17:25:07.801385744 +0000 UTC m=+1554.555657580" watchObservedRunningTime="2025-09-30 17:25:07.811986201 +0000 UTC m=+1554.566258047" Sep 30 17:25:08 crc kubenswrapper[4818]: I0930 17:25:08.050962 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:09 crc kubenswrapper[4818]: I0930 17:25:09.269364 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:10 crc kubenswrapper[4818]: I0930 17:25:10.502027 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:10 crc kubenswrapper[4818]: I0930 17:25:10.865574 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:11 crc kubenswrapper[4818]: E0930 17:25:11.105477 4818 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfe2377e1_d738_46b3_8f4c_d22503a2a648.slice/crio-c38d84203642ce7e577b1c2f01c9149000e12173cbdcc51807297f44c17d74f6.scope\": RecentStats: unable to find data in memory cache]" Sep 30 17:25:11 crc kubenswrapper[4818]: I0930 17:25:11.683794 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:11 crc kubenswrapper[4818]: I0930 17:25:11.789297 4818 generic.go:334] "Generic (PLEG): container finished" podID="fe2377e1-d738-46b3-8f4c-d22503a2a648" containerID="c38d84203642ce7e577b1c2f01c9149000e12173cbdcc51807297f44c17d74f6" exitCode=0 Sep 30 17:25:11 crc kubenswrapper[4818]: I0930 17:25:11.789341 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-db-sync-fns5p" event={"ID":"fe2377e1-d738-46b3-8f4c-d22503a2a648","Type":"ContainerDied","Data":"c38d84203642ce7e577b1c2f01c9149000e12173cbdcc51807297f44c17d74f6"} Sep 30 17:25:12 crc kubenswrapper[4818]: I0930 17:25:12.905533 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.155022 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.214273 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fe2377e1-d738-46b3-8f4c-d22503a2a648-etc-machine-id\") pod \"fe2377e1-d738-46b3-8f4c-d22503a2a648\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.214386 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-config-data\") pod \"fe2377e1-d738-46b3-8f4c-d22503a2a648\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.214418 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fe2377e1-d738-46b3-8f4c-d22503a2a648-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "fe2377e1-d738-46b3-8f4c-d22503a2a648" (UID: "fe2377e1-d738-46b3-8f4c-d22503a2a648"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.214465 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vr7st\" (UniqueName: \"kubernetes.io/projected/fe2377e1-d738-46b3-8f4c-d22503a2a648-kube-api-access-vr7st\") pod \"fe2377e1-d738-46b3-8f4c-d22503a2a648\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.214516 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-scripts\") pod \"fe2377e1-d738-46b3-8f4c-d22503a2a648\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.214563 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-db-sync-config-data\") pod \"fe2377e1-d738-46b3-8f4c-d22503a2a648\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.214640 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-combined-ca-bundle\") pod \"fe2377e1-d738-46b3-8f4c-d22503a2a648\" (UID: \"fe2377e1-d738-46b3-8f4c-d22503a2a648\") " Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.215180 4818 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fe2377e1-d738-46b3-8f4c-d22503a2a648-etc-machine-id\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.239178 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-scripts" (OuterVolumeSpecName: "scripts") pod "fe2377e1-d738-46b3-8f4c-d22503a2a648" (UID: "fe2377e1-d738-46b3-8f4c-d22503a2a648"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.252066 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "fe2377e1-d738-46b3-8f4c-d22503a2a648" (UID: "fe2377e1-d738-46b3-8f4c-d22503a2a648"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.252804 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe2377e1-d738-46b3-8f4c-d22503a2a648-kube-api-access-vr7st" (OuterVolumeSpecName: "kube-api-access-vr7st") pod "fe2377e1-d738-46b3-8f4c-d22503a2a648" (UID: "fe2377e1-d738-46b3-8f4c-d22503a2a648"). InnerVolumeSpecName "kube-api-access-vr7st". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.280512 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fe2377e1-d738-46b3-8f4c-d22503a2a648" (UID: "fe2377e1-d738-46b3-8f4c-d22503a2a648"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.298224 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-config-data" (OuterVolumeSpecName: "config-data") pod "fe2377e1-d738-46b3-8f4c-d22503a2a648" (UID: "fe2377e1-d738-46b3-8f4c-d22503a2a648"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.315871 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vr7st\" (UniqueName: \"kubernetes.io/projected/fe2377e1-d738-46b3-8f4c-d22503a2a648-kube-api-access-vr7st\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.315893 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.315903 4818 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.315912 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.315933 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe2377e1-d738-46b3-8f4c-d22503a2a648-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.813396 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-db-sync-fns5p" event={"ID":"fe2377e1-d738-46b3-8f4c-d22503a2a648","Type":"ContainerDied","Data":"bf769164353a126e2d135475af57ccdc92e2aaa1abe47e306d2cfaddcd161f51"} Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.813443 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf769164353a126e2d135475af57ccdc92e2aaa1abe47e306d2cfaddcd161f51" Sep 30 17:25:13 crc kubenswrapper[4818]: I0930 17:25:13.813466 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-db-sync-fns5p" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.156969 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.203126 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Sep 30 17:25:14 crc kubenswrapper[4818]: E0930 17:25:14.203506 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe2377e1-d738-46b3-8f4c-d22503a2a648" containerName="cinder-db-sync" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.203525 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe2377e1-d738-46b3-8f4c-d22503a2a648" containerName="cinder-db-sync" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.203703 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe2377e1-d738-46b3-8f4c-d22503a2a648" containerName="cinder-db-sync" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.204636 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.207189 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-cinder-dockercfg-qxzts" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.207436 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-scripts" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.207559 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.208951 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-scheduler-config-data" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.209011 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-config-data" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.210507 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.214982 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-backup-config-data" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.218091 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228048 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-dev\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228083 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-run\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228102 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-scripts\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228118 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228241 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-lib-modules\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228315 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228359 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228384 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-config-data\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228417 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-config-data-custom\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228496 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228559 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228590 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-scripts\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228624 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228654 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228684 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-config-data\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228708 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-cert-memcached-mtls\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228796 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228896 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-etc-nvme\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228935 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9m9gd\" (UniqueName: \"kubernetes.io/projected/d72ee158-f9de-476e-8706-d8197d5bc5d8-kube-api-access-9m9gd\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228952 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-sys\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228968 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zr2px\" (UniqueName: \"kubernetes.io/projected/c294afe4-ca9a-4d7a-8069-a46a2cceab60-kube-api-access-zr2px\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.228985 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d72ee158-f9de-476e-8706-d8197d5bc5d8-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.229000 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-cert-memcached-mtls\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.244076 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330216 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-lib-modules\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330266 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330290 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330307 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-config-data\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330330 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-lib-modules\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330354 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-config-data-custom\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330408 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330438 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330456 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-scripts\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330475 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330493 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330509 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-config-data\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330524 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-cert-memcached-mtls\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330541 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330572 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330578 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-etc-nvme\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330621 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-sys\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330640 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9m9gd\" (UniqueName: \"kubernetes.io/projected/d72ee158-f9de-476e-8706-d8197d5bc5d8-kube-api-access-9m9gd\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330665 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zr2px\" (UniqueName: \"kubernetes.io/projected/c294afe4-ca9a-4d7a-8069-a46a2cceab60-kube-api-access-zr2px\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330688 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d72ee158-f9de-476e-8706-d8197d5bc5d8-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330704 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-cert-memcached-mtls\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330736 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-dev\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330753 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-run\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330773 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-scripts\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330796 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.331026 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.331119 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.331145 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-sys\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.330666 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-etc-nvme\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.331524 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d72ee158-f9de-476e-8706-d8197d5bc5d8-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.332039 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-dev\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.332059 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-run\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.333645 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.334711 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-config-data\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.335068 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.335760 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.336686 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-config-data-custom\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.338267 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.339562 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-cert-memcached-mtls\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.340002 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-scripts\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.341170 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-scripts\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.341301 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-cert-memcached-mtls\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.341428 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.341626 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-config-data\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.354451 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9m9gd\" (UniqueName: \"kubernetes.io/projected/d72ee158-f9de-476e-8706-d8197d5bc5d8-kube-api-access-9m9gd\") pod \"cinder-scheduler-0\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.378541 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zr2px\" (UniqueName: \"kubernetes.io/projected/c294afe4-ca9a-4d7a-8069-a46a2cceab60-kube-api-access-zr2px\") pod \"cinder-backup-0\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.383215 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.395770 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.401182 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.410240 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-api-config-data" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.434992 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-scripts\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.435055 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8q2hr\" (UniqueName: \"kubernetes.io/projected/08320b49-7a37-4b7f-9f70-00deedea776b-kube-api-access-8q2hr\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.435124 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08320b49-7a37-4b7f-9f70-00deedea776b-logs\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.435147 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.435169 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-cert-memcached-mtls\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.435222 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-config-data\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.435241 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/08320b49-7a37-4b7f-9f70-00deedea776b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.435267 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-config-data-custom\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.528911 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.539827 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-scripts\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.539872 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8q2hr\" (UniqueName: \"kubernetes.io/projected/08320b49-7a37-4b7f-9f70-00deedea776b-kube-api-access-8q2hr\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.539906 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08320b49-7a37-4b7f-9f70-00deedea776b-logs\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.539936 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.539957 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-cert-memcached-mtls\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.539997 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-config-data\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.540013 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/08320b49-7a37-4b7f-9f70-00deedea776b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.540030 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-config-data-custom\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.540123 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/08320b49-7a37-4b7f-9f70-00deedea776b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.540748 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08320b49-7a37-4b7f-9f70-00deedea776b-logs\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.544473 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-scripts\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.544667 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-config-data\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.545839 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-config-data-custom\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.547552 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.548381 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-cert-memcached-mtls\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.558442 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.565492 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8q2hr\" (UniqueName: \"kubernetes.io/projected/08320b49-7a37-4b7f-9f70-00deedea776b-kube-api-access-8q2hr\") pod \"cinder-api-0\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:14 crc kubenswrapper[4818]: I0930 17:25:14.755052 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:15 crc kubenswrapper[4818]: I0930 17:25:15.021337 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:25:15 crc kubenswrapper[4818]: E0930 17:25:15.021558 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:25:15 crc kubenswrapper[4818]: I0930 17:25:15.080562 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Sep 30 17:25:15 crc kubenswrapper[4818]: W0930 17:25:15.090424 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc294afe4_ca9a_4d7a_8069_a46a2cceab60.slice/crio-e9156e51e06061c665bb4848e3915fdbaa5119ba630deadadc2ab9506d72f8a5 WatchSource:0}: Error finding container e9156e51e06061c665bb4848e3915fdbaa5119ba630deadadc2ab9506d72f8a5: Status 404 returned error can't find the container with id e9156e51e06061c665bb4848e3915fdbaa5119ba630deadadc2ab9506d72f8a5 Sep 30 17:25:15 crc kubenswrapper[4818]: I0930 17:25:15.152913 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Sep 30 17:25:15 crc kubenswrapper[4818]: I0930 17:25:15.315323 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Sep 30 17:25:15 crc kubenswrapper[4818]: W0930 17:25:15.319342 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08320b49_7a37_4b7f_9f70_00deedea776b.slice/crio-12c61404c1788b01c7031e0d7be41aca33122c5c8dbd46b7dfbb8685d02f126d WatchSource:0}: Error finding container 12c61404c1788b01c7031e0d7be41aca33122c5c8dbd46b7dfbb8685d02f126d: Status 404 returned error can't find the container with id 12c61404c1788b01c7031e0d7be41aca33122c5c8dbd46b7dfbb8685d02f126d Sep 30 17:25:15 crc kubenswrapper[4818]: I0930 17:25:15.353840 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:15 crc kubenswrapper[4818]: I0930 17:25:15.844352 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"08320b49-7a37-4b7f-9f70-00deedea776b","Type":"ContainerStarted","Data":"12c61404c1788b01c7031e0d7be41aca33122c5c8dbd46b7dfbb8685d02f126d"} Sep 30 17:25:15 crc kubenswrapper[4818]: I0930 17:25:15.852827 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"c294afe4-ca9a-4d7a-8069-a46a2cceab60","Type":"ContainerStarted","Data":"e9156e51e06061c665bb4848e3915fdbaa5119ba630deadadc2ab9506d72f8a5"} Sep 30 17:25:15 crc kubenswrapper[4818]: I0930 17:25:15.886020 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"d72ee158-f9de-476e-8706-d8197d5bc5d8","Type":"ContainerStarted","Data":"452f29ea8dd2c827058dfae2be8005d2f91d0774962158613668b2508f72cea6"} Sep 30 17:25:16 crc kubenswrapper[4818]: I0930 17:25:16.562377 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:16 crc kubenswrapper[4818]: I0930 17:25:16.806334 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Sep 30 17:25:16 crc kubenswrapper[4818]: I0930 17:25:16.902531 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"d72ee158-f9de-476e-8706-d8197d5bc5d8","Type":"ContainerStarted","Data":"860e94d6cb2536e91fde4fccb55c68ffe28066b16960b0522cb67b16d55c323b"} Sep 30 17:25:16 crc kubenswrapper[4818]: I0930 17:25:16.904125 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"08320b49-7a37-4b7f-9f70-00deedea776b","Type":"ContainerStarted","Data":"4d203e46fbdb2153559f8cdaeba6487e1fd4d52f47092b8d4d11500e2bf68d02"} Sep 30 17:25:16 crc kubenswrapper[4818]: I0930 17:25:16.907838 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"c294afe4-ca9a-4d7a-8069-a46a2cceab60","Type":"ContainerStarted","Data":"173eff3f7daf59ce0729e3611a0f1be325ccbc158e1d27179a61713c3f4e62c0"} Sep 30 17:25:16 crc kubenswrapper[4818]: I0930 17:25:16.907882 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"c294afe4-ca9a-4d7a-8069-a46a2cceab60","Type":"ContainerStarted","Data":"435f53bf1e26bee2c441fa72f527f629c0a0db104d2704c7aa5e4b94dc0eae79"} Sep 30 17:25:16 crc kubenswrapper[4818]: I0930 17:25:16.937329 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/cinder-backup-0" podStartSLOduration=1.870111015 podStartE2EDuration="2.937312624s" podCreationTimestamp="2025-09-30 17:25:14 +0000 UTC" firstStartedPulling="2025-09-30 17:25:15.098996634 +0000 UTC m=+1561.853268450" lastFinishedPulling="2025-09-30 17:25:16.166198243 +0000 UTC m=+1562.920470059" observedRunningTime="2025-09-30 17:25:16.931733443 +0000 UTC m=+1563.686005259" watchObservedRunningTime="2025-09-30 17:25:16.937312624 +0000 UTC m=+1563.691584440" Sep 30 17:25:17 crc kubenswrapper[4818]: I0930 17:25:17.785263 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:17 crc kubenswrapper[4818]: I0930 17:25:17.941408 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"d72ee158-f9de-476e-8706-d8197d5bc5d8","Type":"ContainerStarted","Data":"a64e063371f1aa14bb3a444dfca61048359dc1aa94cfb719ab3fb75ef1ff3ccd"} Sep 30 17:25:17 crc kubenswrapper[4818]: I0930 17:25:17.946041 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"08320b49-7a37-4b7f-9f70-00deedea776b","Type":"ContainerStarted","Data":"c85711c0255ba4303c0d6b1f91db00c63f460752cdc2f8da8977f9981060b9e2"} Sep 30 17:25:17 crc kubenswrapper[4818]: I0930 17:25:17.946112 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-api-0" podUID="08320b49-7a37-4b7f-9f70-00deedea776b" containerName="cinder-api" containerID="cri-o://c85711c0255ba4303c0d6b1f91db00c63f460752cdc2f8da8977f9981060b9e2" gracePeriod=30 Sep 30 17:25:17 crc kubenswrapper[4818]: I0930 17:25:17.946144 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:17 crc kubenswrapper[4818]: I0930 17:25:17.946075 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-api-0" podUID="08320b49-7a37-4b7f-9f70-00deedea776b" containerName="cinder-api-log" containerID="cri-o://4d203e46fbdb2153559f8cdaeba6487e1fd4d52f47092b8d4d11500e2bf68d02" gracePeriod=30 Sep 30 17:25:17 crc kubenswrapper[4818]: I0930 17:25:17.973800 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/cinder-scheduler-0" podStartSLOduration=3.062859132 podStartE2EDuration="3.973783524s" podCreationTimestamp="2025-09-30 17:25:14 +0000 UTC" firstStartedPulling="2025-09-30 17:25:15.197103278 +0000 UTC m=+1561.951375094" lastFinishedPulling="2025-09-30 17:25:16.10802767 +0000 UTC m=+1562.862299486" observedRunningTime="2025-09-30 17:25:17.967089333 +0000 UTC m=+1564.721361169" watchObservedRunningTime="2025-09-30 17:25:17.973783524 +0000 UTC m=+1564.728055330" Sep 30 17:25:17 crc kubenswrapper[4818]: I0930 17:25:17.994971 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/cinder-api-0" podStartSLOduration=3.994954757 podStartE2EDuration="3.994954757s" podCreationTimestamp="2025-09-30 17:25:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:25:17.992014697 +0000 UTC m=+1564.746286513" watchObservedRunningTime="2025-09-30 17:25:17.994954757 +0000 UTC m=+1564.749226573" Sep 30 17:25:18 crc kubenswrapper[4818]: I0930 17:25:18.972222 4818 generic.go:334] "Generic (PLEG): container finished" podID="08320b49-7a37-4b7f-9f70-00deedea776b" containerID="c85711c0255ba4303c0d6b1f91db00c63f460752cdc2f8da8977f9981060b9e2" exitCode=0 Sep 30 17:25:18 crc kubenswrapper[4818]: I0930 17:25:18.972751 4818 generic.go:334] "Generic (PLEG): container finished" podID="08320b49-7a37-4b7f-9f70-00deedea776b" containerID="4d203e46fbdb2153559f8cdaeba6487e1fd4d52f47092b8d4d11500e2bf68d02" exitCode=143 Sep 30 17:25:18 crc kubenswrapper[4818]: I0930 17:25:18.972297 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"08320b49-7a37-4b7f-9f70-00deedea776b","Type":"ContainerDied","Data":"c85711c0255ba4303c0d6b1f91db00c63f460752cdc2f8da8977f9981060b9e2"} Sep 30 17:25:18 crc kubenswrapper[4818]: I0930 17:25:18.972800 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"08320b49-7a37-4b7f-9f70-00deedea776b","Type":"ContainerDied","Data":"4d203e46fbdb2153559f8cdaeba6487e1fd4d52f47092b8d4d11500e2bf68d02"} Sep 30 17:25:18 crc kubenswrapper[4818]: I0930 17:25:18.992458 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.161302 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.254987 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-config-data-custom\") pod \"08320b49-7a37-4b7f-9f70-00deedea776b\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.255040 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8q2hr\" (UniqueName: \"kubernetes.io/projected/08320b49-7a37-4b7f-9f70-00deedea776b-kube-api-access-8q2hr\") pod \"08320b49-7a37-4b7f-9f70-00deedea776b\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.255092 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08320b49-7a37-4b7f-9f70-00deedea776b-logs\") pod \"08320b49-7a37-4b7f-9f70-00deedea776b\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.255115 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-cert-memcached-mtls\") pod \"08320b49-7a37-4b7f-9f70-00deedea776b\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.255148 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-config-data\") pod \"08320b49-7a37-4b7f-9f70-00deedea776b\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.255290 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/08320b49-7a37-4b7f-9f70-00deedea776b-etc-machine-id\") pod \"08320b49-7a37-4b7f-9f70-00deedea776b\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.255313 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-scripts\") pod \"08320b49-7a37-4b7f-9f70-00deedea776b\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.255341 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-combined-ca-bundle\") pod \"08320b49-7a37-4b7f-9f70-00deedea776b\" (UID: \"08320b49-7a37-4b7f-9f70-00deedea776b\") " Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.255835 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08320b49-7a37-4b7f-9f70-00deedea776b-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "08320b49-7a37-4b7f-9f70-00deedea776b" (UID: "08320b49-7a37-4b7f-9f70-00deedea776b"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.256293 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08320b49-7a37-4b7f-9f70-00deedea776b-logs" (OuterVolumeSpecName: "logs") pod "08320b49-7a37-4b7f-9f70-00deedea776b" (UID: "08320b49-7a37-4b7f-9f70-00deedea776b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.259869 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-scripts" (OuterVolumeSpecName: "scripts") pod "08320b49-7a37-4b7f-9f70-00deedea776b" (UID: "08320b49-7a37-4b7f-9f70-00deedea776b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.275022 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08320b49-7a37-4b7f-9f70-00deedea776b-kube-api-access-8q2hr" (OuterVolumeSpecName: "kube-api-access-8q2hr") pod "08320b49-7a37-4b7f-9f70-00deedea776b" (UID: "08320b49-7a37-4b7f-9f70-00deedea776b"). InnerVolumeSpecName "kube-api-access-8q2hr". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.285561 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "08320b49-7a37-4b7f-9f70-00deedea776b" (UID: "08320b49-7a37-4b7f-9f70-00deedea776b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.292073 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "08320b49-7a37-4b7f-9f70-00deedea776b" (UID: "08320b49-7a37-4b7f-9f70-00deedea776b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.329438 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "08320b49-7a37-4b7f-9f70-00deedea776b" (UID: "08320b49-7a37-4b7f-9f70-00deedea776b"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.335950 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-config-data" (OuterVolumeSpecName: "config-data") pod "08320b49-7a37-4b7f-9f70-00deedea776b" (UID: "08320b49-7a37-4b7f-9f70-00deedea776b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.357218 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08320b49-7a37-4b7f-9f70-00deedea776b-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.357248 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.357258 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.357268 4818 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/08320b49-7a37-4b7f-9f70-00deedea776b-etc-machine-id\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.357278 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.357287 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.357295 4818 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/08320b49-7a37-4b7f-9f70-00deedea776b-config-data-custom\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.357304 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8q2hr\" (UniqueName: \"kubernetes.io/projected/08320b49-7a37-4b7f-9f70-00deedea776b-kube-api-access-8q2hr\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.529953 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.559429 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.986402 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"08320b49-7a37-4b7f-9f70-00deedea776b","Type":"ContainerDied","Data":"12c61404c1788b01c7031e0d7be41aca33122c5c8dbd46b7dfbb8685d02f126d"} Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.986516 4818 scope.go:117] "RemoveContainer" containerID="c85711c0255ba4303c0d6b1f91db00c63f460752cdc2f8da8977f9981060b9e2" Sep 30 17:25:19 crc kubenswrapper[4818]: I0930 17:25:19.986423 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.019055 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.033126 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.065147 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Sep 30 17:25:20 crc kubenswrapper[4818]: E0930 17:25:20.065512 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08320b49-7a37-4b7f-9f70-00deedea776b" containerName="cinder-api" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.065527 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="08320b49-7a37-4b7f-9f70-00deedea776b" containerName="cinder-api" Sep 30 17:25:20 crc kubenswrapper[4818]: E0930 17:25:20.065552 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08320b49-7a37-4b7f-9f70-00deedea776b" containerName="cinder-api-log" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.065558 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="08320b49-7a37-4b7f-9f70-00deedea776b" containerName="cinder-api-log" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.065726 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="08320b49-7a37-4b7f-9f70-00deedea776b" containerName="cinder-api-log" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.065747 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="08320b49-7a37-4b7f-9f70-00deedea776b" containerName="cinder-api" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.066698 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.069891 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-cinder-internal-svc" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.070258 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-api-config-data" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.076651 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.088134 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-cinder-public-svc" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.168748 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwcqm\" (UniqueName: \"kubernetes.io/projected/3351d4d0-c886-4477-bc07-427cc064b4f7-kube-api-access-lwcqm\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.169070 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.169182 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-scripts\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.169288 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-config-data\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.169387 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3351d4d0-c886-4477-bc07-427cc064b4f7-logs\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.169540 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3351d4d0-c886-4477-bc07-427cc064b4f7-etc-machine-id\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.169597 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-config-data-custom\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.169622 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-cert-memcached-mtls\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.169647 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-public-tls-certs\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.169712 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.236833 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.271502 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3351d4d0-c886-4477-bc07-427cc064b4f7-etc-machine-id\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.271562 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-config-data-custom\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.271579 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-cert-memcached-mtls\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.271595 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-public-tls-certs\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.271617 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.271644 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3351d4d0-c886-4477-bc07-427cc064b4f7-etc-machine-id\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.271652 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwcqm\" (UniqueName: \"kubernetes.io/projected/3351d4d0-c886-4477-bc07-427cc064b4f7-kube-api-access-lwcqm\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.272072 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.272101 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-scripts\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.272185 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-config-data\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.272213 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3351d4d0-c886-4477-bc07-427cc064b4f7-logs\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.272693 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3351d4d0-c886-4477-bc07-427cc064b4f7-logs\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.275551 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-config-data-custom\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.275613 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.275889 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-cert-memcached-mtls\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.275985 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.276000 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-config-data\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.276011 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-scripts\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.291467 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-public-tls-certs\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.300833 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwcqm\" (UniqueName: \"kubernetes.io/projected/3351d4d0-c886-4477-bc07-427cc064b4f7-kube-api-access-lwcqm\") pod \"cinder-api-0\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:20 crc kubenswrapper[4818]: I0930 17:25:20.406677 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:21 crc kubenswrapper[4818]: I0930 17:25:21.103328 4818 scope.go:117] "RemoveContainer" containerID="635c3fc1e2779aa6aa45c7556739ca5ec0d426229819dea5f6d025cdd090485b" Sep 30 17:25:21 crc kubenswrapper[4818]: I0930 17:25:21.413391 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:21 crc kubenswrapper[4818]: I0930 17:25:21.791592 4818 scope.go:117] "RemoveContainer" containerID="4d203e46fbdb2153559f8cdaeba6487e1fd4d52f47092b8d4d11500e2bf68d02" Sep 30 17:25:21 crc kubenswrapper[4818]: I0930 17:25:21.817613 4818 scope.go:117] "RemoveContainer" containerID="1796ea9b7e073f5c8595a00dbe95c455d8e2979867dc479c50c0bfc3fc75112d" Sep 30 17:25:22 crc kubenswrapper[4818]: I0930 17:25:22.032043 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08320b49-7a37-4b7f-9f70-00deedea776b" path="/var/lib/kubelet/pods/08320b49-7a37-4b7f-9f70-00deedea776b/volumes" Sep 30 17:25:22 crc kubenswrapper[4818]: I0930 17:25:22.284538 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Sep 30 17:25:22 crc kubenswrapper[4818]: W0930 17:25:22.307104 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3351d4d0_c886_4477_bc07_427cc064b4f7.slice/crio-1a0d0ed4fb7e4d5f1a6eceb5cf495fa35f4e4581642f66069418d0fccf053eee WatchSource:0}: Error finding container 1a0d0ed4fb7e4d5f1a6eceb5cf495fa35f4e4581642f66069418d0fccf053eee: Status 404 returned error can't find the container with id 1a0d0ed4fb7e4d5f1a6eceb5cf495fa35f4e4581642f66069418d0fccf053eee Sep 30 17:25:22 crc kubenswrapper[4818]: I0930 17:25:22.679577 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:23 crc kubenswrapper[4818]: I0930 17:25:23.026172 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"3351d4d0-c886-4477-bc07-427cc064b4f7","Type":"ContainerStarted","Data":"54ad1d586bec7dee1dc0790d87a3daf75422d7d613b74dede8247d7aac7fd5b8"} Sep 30 17:25:23 crc kubenswrapper[4818]: I0930 17:25:23.026210 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"3351d4d0-c886-4477-bc07-427cc064b4f7","Type":"ContainerStarted","Data":"1a0d0ed4fb7e4d5f1a6eceb5cf495fa35f4e4581642f66069418d0fccf053eee"} Sep 30 17:25:23 crc kubenswrapper[4818]: I0930 17:25:23.922446 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:24 crc kubenswrapper[4818]: I0930 17:25:24.041497 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"3351d4d0-c886-4477-bc07-427cc064b4f7","Type":"ContainerStarted","Data":"1ac70bd903f9edf4488db80a3d44b6874efab86e3c7fd168d369f46252bd60dd"} Sep 30 17:25:24 crc kubenswrapper[4818]: I0930 17:25:24.041840 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:24 crc kubenswrapper[4818]: I0930 17:25:24.071428 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/cinder-api-0" podStartSLOduration=4.0714117 podStartE2EDuration="4.0714117s" podCreationTimestamp="2025-09-30 17:25:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:25:24.069105098 +0000 UTC m=+1570.823376914" watchObservedRunningTime="2025-09-30 17:25:24.0714117 +0000 UTC m=+1570.825683516" Sep 30 17:25:24 crc kubenswrapper[4818]: I0930 17:25:24.751791 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:24 crc kubenswrapper[4818]: I0930 17:25:24.826911 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:24 crc kubenswrapper[4818]: I0930 17:25:24.828752 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Sep 30 17:25:24 crc kubenswrapper[4818]: I0930 17:25:24.904086 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Sep 30 17:25:25 crc kubenswrapper[4818]: I0930 17:25:25.048945 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-backup-0" podUID="c294afe4-ca9a-4d7a-8069-a46a2cceab60" containerName="cinder-backup" containerID="cri-o://435f53bf1e26bee2c441fa72f527f629c0a0db104d2704c7aa5e4b94dc0eae79" gracePeriod=30 Sep 30 17:25:25 crc kubenswrapper[4818]: I0930 17:25:25.049036 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-backup-0" podUID="c294afe4-ca9a-4d7a-8069-a46a2cceab60" containerName="probe" containerID="cri-o://173eff3f7daf59ce0729e3611a0f1be325ccbc158e1d27179a61713c3f4e62c0" gracePeriod=30 Sep 30 17:25:25 crc kubenswrapper[4818]: I0930 17:25:25.049378 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-scheduler-0" podUID="d72ee158-f9de-476e-8706-d8197d5bc5d8" containerName="cinder-scheduler" containerID="cri-o://860e94d6cb2536e91fde4fccb55c68ffe28066b16960b0522cb67b16d55c323b" gracePeriod=30 Sep 30 17:25:25 crc kubenswrapper[4818]: I0930 17:25:25.049447 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-scheduler-0" podUID="d72ee158-f9de-476e-8706-d8197d5bc5d8" containerName="probe" containerID="cri-o://a64e063371f1aa14bb3a444dfca61048359dc1aa94cfb719ab3fb75ef1ff3ccd" gracePeriod=30 Sep 30 17:25:25 crc kubenswrapper[4818]: I0930 17:25:25.114181 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:26 crc kubenswrapper[4818]: I0930 17:25:26.061135 4818 generic.go:334] "Generic (PLEG): container finished" podID="c294afe4-ca9a-4d7a-8069-a46a2cceab60" containerID="173eff3f7daf59ce0729e3611a0f1be325ccbc158e1d27179a61713c3f4e62c0" exitCode=0 Sep 30 17:25:26 crc kubenswrapper[4818]: I0930 17:25:26.061340 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"c294afe4-ca9a-4d7a-8069-a46a2cceab60","Type":"ContainerDied","Data":"173eff3f7daf59ce0729e3611a0f1be325ccbc158e1d27179a61713c3f4e62c0"} Sep 30 17:25:26 crc kubenswrapper[4818]: I0930 17:25:26.063482 4818 generic.go:334] "Generic (PLEG): container finished" podID="d72ee158-f9de-476e-8706-d8197d5bc5d8" containerID="a64e063371f1aa14bb3a444dfca61048359dc1aa94cfb719ab3fb75ef1ff3ccd" exitCode=0 Sep 30 17:25:26 crc kubenswrapper[4818]: I0930 17:25:26.063519 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"d72ee158-f9de-476e-8706-d8197d5bc5d8","Type":"ContainerDied","Data":"a64e063371f1aa14bb3a444dfca61048359dc1aa94cfb719ab3fb75ef1ff3ccd"} Sep 30 17:25:26 crc kubenswrapper[4818]: I0930 17:25:26.310824 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:26 crc kubenswrapper[4818]: I0930 17:25:26.369069 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:25:26 crc kubenswrapper[4818]: I0930 17:25:26.369541 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="f6778e25-7694-4fc9-9f75-b28e21e39099" containerName="watcher-decision-engine" containerID="cri-o://0a348a50d34a23fd31682ffc32c6c847704627ae1012a3e4d9078f9f82e93aee" gracePeriod=30 Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.020471 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:25:27 crc kubenswrapper[4818]: E0930 17:25:27.020755 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.116315 4818 generic.go:334] "Generic (PLEG): container finished" podID="d72ee158-f9de-476e-8706-d8197d5bc5d8" containerID="860e94d6cb2536e91fde4fccb55c68ffe28066b16960b0522cb67b16d55c323b" exitCode=0 Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.116678 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"d72ee158-f9de-476e-8706-d8197d5bc5d8","Type":"ContainerDied","Data":"860e94d6cb2536e91fde4fccb55c68ffe28066b16960b0522cb67b16d55c323b"} Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.119206 4818 generic.go:334] "Generic (PLEG): container finished" podID="c294afe4-ca9a-4d7a-8069-a46a2cceab60" containerID="435f53bf1e26bee2c441fa72f527f629c0a0db104d2704c7aa5e4b94dc0eae79" exitCode=0 Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.119247 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"c294afe4-ca9a-4d7a-8069-a46a2cceab60","Type":"ContainerDied","Data":"435f53bf1e26bee2c441fa72f527f629c0a0db104d2704c7aa5e4b94dc0eae79"} Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.361178 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.361463 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerName="sg-core" containerID="cri-o://5bdd18722c833531c7421f248f5aa6befa10ec60906dc839d1dd31024103f5d5" gracePeriod=30 Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.361541 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerName="ceilometer-notification-agent" containerID="cri-o://b76379470dffe183f2b18fa05822eb74ab83756077433ba1cbbee3c8e6ea118d" gracePeriod=30 Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.361584 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerName="ceilometer-central-agent" containerID="cri-o://f9d269a88531a325af77e204a349f1a4e83f80539d65177d99d2449ac1fc6562" gracePeriod=30 Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.361458 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerName="proxy-httpd" containerID="cri-o://f1bebd7390532617ddf99abd6c925fe5a652476aa71ad38bb78a3ad9358204ac" gracePeriod=30 Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.427367 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.502271 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.525436 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600457 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-dev\") pod \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600517 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-etc-iscsi\") pod \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600543 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-config-data\") pod \"d72ee158-f9de-476e-8706-d8197d5bc5d8\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600543 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-dev" (OuterVolumeSpecName: "dev") pod "c294afe4-ca9a-4d7a-8069-a46a2cceab60" (UID: "c294afe4-ca9a-4d7a-8069-a46a2cceab60"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600562 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-combined-ca-bundle\") pod \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600633 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-var-locks-brick\") pod \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600643 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "c294afe4-ca9a-4d7a-8069-a46a2cceab60" (UID: "c294afe4-ca9a-4d7a-8069-a46a2cceab60"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600684 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-config-data\") pod \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600709 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-run\") pod \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600757 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-config-data-custom\") pod \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600752 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "c294afe4-ca9a-4d7a-8069-a46a2cceab60" (UID: "c294afe4-ca9a-4d7a-8069-a46a2cceab60"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600796 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d72ee158-f9de-476e-8706-d8197d5bc5d8-etc-machine-id\") pod \"d72ee158-f9de-476e-8706-d8197d5bc5d8\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600811 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-run" (OuterVolumeSpecName: "run") pod "c294afe4-ca9a-4d7a-8069-a46a2cceab60" (UID: "c294afe4-ca9a-4d7a-8069-a46a2cceab60"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600835 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-etc-nvme\") pod \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600870 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zr2px\" (UniqueName: \"kubernetes.io/projected/c294afe4-ca9a-4d7a-8069-a46a2cceab60-kube-api-access-zr2px\") pod \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600877 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d72ee158-f9de-476e-8706-d8197d5bc5d8-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d72ee158-f9de-476e-8706-d8197d5bc5d8" (UID: "d72ee158-f9de-476e-8706-d8197d5bc5d8"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600889 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-var-locks-cinder\") pod \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600912 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-var-locks-cinder" (OuterVolumeSpecName: "var-locks-cinder") pod "c294afe4-ca9a-4d7a-8069-a46a2cceab60" (UID: "c294afe4-ca9a-4d7a-8069-a46a2cceab60"). InnerVolumeSpecName "var-locks-cinder". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600956 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-sys\") pod \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.600988 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-combined-ca-bundle\") pod \"d72ee158-f9de-476e-8706-d8197d5bc5d8\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601009 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "c294afe4-ca9a-4d7a-8069-a46a2cceab60" (UID: "c294afe4-ca9a-4d7a-8069-a46a2cceab60"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601042 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-sys" (OuterVolumeSpecName: "sys") pod "c294afe4-ca9a-4d7a-8069-a46a2cceab60" (UID: "c294afe4-ca9a-4d7a-8069-a46a2cceab60"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601052 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-var-lib-cinder\") pod \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601071 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-cert-memcached-mtls\") pod \"d72ee158-f9de-476e-8706-d8197d5bc5d8\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601088 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-lib-modules\") pod \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601124 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-scripts\") pod \"d72ee158-f9de-476e-8706-d8197d5bc5d8\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601140 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9m9gd\" (UniqueName: \"kubernetes.io/projected/d72ee158-f9de-476e-8706-d8197d5bc5d8-kube-api-access-9m9gd\") pod \"d72ee158-f9de-476e-8706-d8197d5bc5d8\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601173 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-config-data-custom\") pod \"d72ee158-f9de-476e-8706-d8197d5bc5d8\" (UID: \"d72ee158-f9de-476e-8706-d8197d5bc5d8\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601203 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-cert-memcached-mtls\") pod \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601221 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-scripts\") pod \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601250 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-etc-machine-id\") pod \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\" (UID: \"c294afe4-ca9a-4d7a-8069-a46a2cceab60\") " Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601766 4818 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-dev\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601783 4818 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-etc-iscsi\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601792 4818 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-var-locks-brick\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601800 4818 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-run\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601808 4818 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d72ee158-f9de-476e-8706-d8197d5bc5d8-etc-machine-id\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601816 4818 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-etc-nvme\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601843 4818 reconciler_common.go:293] "Volume detached for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-var-locks-cinder\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601851 4818 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-sys\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.601878 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "c294afe4-ca9a-4d7a-8069-a46a2cceab60" (UID: "c294afe4-ca9a-4d7a-8069-a46a2cceab60"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.605789 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "c294afe4-ca9a-4d7a-8069-a46a2cceab60" (UID: "c294afe4-ca9a-4d7a-8069-a46a2cceab60"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.605859 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-var-lib-cinder" (OuterVolumeSpecName: "var-lib-cinder") pod "c294afe4-ca9a-4d7a-8069-a46a2cceab60" (UID: "c294afe4-ca9a-4d7a-8069-a46a2cceab60"). InnerVolumeSpecName "var-lib-cinder". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.607195 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-scripts" (OuterVolumeSpecName: "scripts") pod "d72ee158-f9de-476e-8706-d8197d5bc5d8" (UID: "d72ee158-f9de-476e-8706-d8197d5bc5d8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.611062 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-scripts" (OuterVolumeSpecName: "scripts") pod "c294afe4-ca9a-4d7a-8069-a46a2cceab60" (UID: "c294afe4-ca9a-4d7a-8069-a46a2cceab60"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.612330 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c294afe4-ca9a-4d7a-8069-a46a2cceab60" (UID: "c294afe4-ca9a-4d7a-8069-a46a2cceab60"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.617077 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c294afe4-ca9a-4d7a-8069-a46a2cceab60-kube-api-access-zr2px" (OuterVolumeSpecName: "kube-api-access-zr2px") pod "c294afe4-ca9a-4d7a-8069-a46a2cceab60" (UID: "c294afe4-ca9a-4d7a-8069-a46a2cceab60"). InnerVolumeSpecName "kube-api-access-zr2px". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.617087 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d72ee158-f9de-476e-8706-d8197d5bc5d8" (UID: "d72ee158-f9de-476e-8706-d8197d5bc5d8"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.619048 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d72ee158-f9de-476e-8706-d8197d5bc5d8-kube-api-access-9m9gd" (OuterVolumeSpecName: "kube-api-access-9m9gd") pod "d72ee158-f9de-476e-8706-d8197d5bc5d8" (UID: "d72ee158-f9de-476e-8706-d8197d5bc5d8"). InnerVolumeSpecName "kube-api-access-9m9gd". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.658366 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c294afe4-ca9a-4d7a-8069-a46a2cceab60" (UID: "c294afe4-ca9a-4d7a-8069-a46a2cceab60"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.688543 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d72ee158-f9de-476e-8706-d8197d5bc5d8" (UID: "d72ee158-f9de-476e-8706-d8197d5bc5d8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.702963 4818 reconciler_common.go:293] "Volume detached for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-var-lib-cinder\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.703204 4818 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-lib-modules\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.703260 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.703318 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9m9gd\" (UniqueName: \"kubernetes.io/projected/d72ee158-f9de-476e-8706-d8197d5bc5d8-kube-api-access-9m9gd\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.703374 4818 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-config-data-custom\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.703424 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.703481 4818 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c294afe4-ca9a-4d7a-8069-a46a2cceab60-etc-machine-id\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.703540 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.703597 4818 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-config-data-custom\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.703647 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zr2px\" (UniqueName: \"kubernetes.io/projected/c294afe4-ca9a-4d7a-8069-a46a2cceab60-kube-api-access-zr2px\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.703697 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.706042 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-config-data" (OuterVolumeSpecName: "config-data") pod "c294afe4-ca9a-4d7a-8069-a46a2cceab60" (UID: "c294afe4-ca9a-4d7a-8069-a46a2cceab60"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.706972 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-config-data" (OuterVolumeSpecName: "config-data") pod "d72ee158-f9de-476e-8706-d8197d5bc5d8" (UID: "d72ee158-f9de-476e-8706-d8197d5bc5d8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.749710 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "c294afe4-ca9a-4d7a-8069-a46a2cceab60" (UID: "c294afe4-ca9a-4d7a-8069-a46a2cceab60"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.765062 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "d72ee158-f9de-476e-8706-d8197d5bc5d8" (UID: "d72ee158-f9de-476e-8706-d8197d5bc5d8"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.805216 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.805259 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.805270 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d72ee158-f9de-476e-8706-d8197d5bc5d8-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:27 crc kubenswrapper[4818]: I0930 17:25:27.805278 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c294afe4-ca9a-4d7a-8069-a46a2cceab60-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.129605 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"c294afe4-ca9a-4d7a-8069-a46a2cceab60","Type":"ContainerDied","Data":"e9156e51e06061c665bb4848e3915fdbaa5119ba630deadadc2ab9506d72f8a5"} Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.129655 4818 scope.go:117] "RemoveContainer" containerID="173eff3f7daf59ce0729e3611a0f1be325ccbc158e1d27179a61713c3f4e62c0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.129782 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.133292 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"d72ee158-f9de-476e-8706-d8197d5bc5d8","Type":"ContainerDied","Data":"452f29ea8dd2c827058dfae2be8005d2f91d0774962158613668b2508f72cea6"} Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.133319 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.140619 4818 generic.go:334] "Generic (PLEG): container finished" podID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerID="f1bebd7390532617ddf99abd6c925fe5a652476aa71ad38bb78a3ad9358204ac" exitCode=0 Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.140841 4818 generic.go:334] "Generic (PLEG): container finished" podID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerID="5bdd18722c833531c7421f248f5aa6befa10ec60906dc839d1dd31024103f5d5" exitCode=2 Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.140956 4818 generic.go:334] "Generic (PLEG): container finished" podID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerID="f9d269a88531a325af77e204a349f1a4e83f80539d65177d99d2449ac1fc6562" exitCode=0 Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.140660 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f","Type":"ContainerDied","Data":"f1bebd7390532617ddf99abd6c925fe5a652476aa71ad38bb78a3ad9358204ac"} Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.141084 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f","Type":"ContainerDied","Data":"5bdd18722c833531c7421f248f5aa6befa10ec60906dc839d1dd31024103f5d5"} Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.141104 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f","Type":"ContainerDied","Data":"f9d269a88531a325af77e204a349f1a4e83f80539d65177d99d2449ac1fc6562"} Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.152877 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.161740 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.162390 4818 scope.go:117] "RemoveContainer" containerID="435f53bf1e26bee2c441fa72f527f629c0a0db104d2704c7aa5e4b94dc0eae79" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.184599 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.184780 4818 scope.go:117] "RemoveContainer" containerID="a64e063371f1aa14bb3a444dfca61048359dc1aa94cfb719ab3fb75ef1ff3ccd" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.197761 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.207111 4818 scope.go:117] "RemoveContainer" containerID="860e94d6cb2536e91fde4fccb55c68ffe28066b16960b0522cb67b16d55c323b" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.208120 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Sep 30 17:25:28 crc kubenswrapper[4818]: E0930 17:25:28.208876 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d72ee158-f9de-476e-8706-d8197d5bc5d8" containerName="probe" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.208903 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="d72ee158-f9de-476e-8706-d8197d5bc5d8" containerName="probe" Sep 30 17:25:28 crc kubenswrapper[4818]: E0930 17:25:28.208917 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d72ee158-f9de-476e-8706-d8197d5bc5d8" containerName="cinder-scheduler" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.208948 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="d72ee158-f9de-476e-8706-d8197d5bc5d8" containerName="cinder-scheduler" Sep 30 17:25:28 crc kubenswrapper[4818]: E0930 17:25:28.208986 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c294afe4-ca9a-4d7a-8069-a46a2cceab60" containerName="cinder-backup" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.208994 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="c294afe4-ca9a-4d7a-8069-a46a2cceab60" containerName="cinder-backup" Sep 30 17:25:28 crc kubenswrapper[4818]: E0930 17:25:28.209011 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c294afe4-ca9a-4d7a-8069-a46a2cceab60" containerName="probe" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.209019 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="c294afe4-ca9a-4d7a-8069-a46a2cceab60" containerName="probe" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.209211 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="d72ee158-f9de-476e-8706-d8197d5bc5d8" containerName="cinder-scheduler" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.209234 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="d72ee158-f9de-476e-8706-d8197d5bc5d8" containerName="probe" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.209267 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="c294afe4-ca9a-4d7a-8069-a46a2cceab60" containerName="cinder-backup" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.209278 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="c294afe4-ca9a-4d7a-8069-a46a2cceab60" containerName="probe" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.211111 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.217630 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-backup-config-data" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.231991 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.243982 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.245365 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.250492 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.250654 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-scheduler-config-data" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.312320 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-config-data-custom\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.312370 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-scripts\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.312451 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.312483 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.312500 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.312568 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-lib-modules\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.312621 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.312727 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76jm9\" (UniqueName: \"kubernetes.io/projected/d51d078b-04c1-4ea7-9411-68e8e8f9f160-kube-api-access-76jm9\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.312768 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-sys\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.312838 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-config-data\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.312899 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-dev\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.312946 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-cert-memcached-mtls\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.312993 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-etc-nvme\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.313016 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.313041 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.313107 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-run\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.414857 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-config-data-custom\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415187 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-scripts\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415235 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415265 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-scripts\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415314 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415348 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415376 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415401 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-lib-modules\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415435 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415465 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9x92t\" (UniqueName: \"kubernetes.io/projected/34dccb04-d413-4e39-b2c9-87bcec31e790-kube-api-access-9x92t\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415505 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-lib-modules\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415534 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415395 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415613 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-config-data\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415651 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415734 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76jm9\" (UniqueName: \"kubernetes.io/projected/d51d078b-04c1-4ea7-9411-68e8e8f9f160-kube-api-access-76jm9\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415789 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-sys\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415833 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-config-data\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415852 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-sys\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415936 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.415977 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-dev\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.416022 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-cert-memcached-mtls\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.416064 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-dev\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.416080 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-etc-nvme\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.416124 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-etc-nvme\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.416138 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.416179 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.416284 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-run\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.416336 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/34dccb04-d413-4e39-b2c9-87bcec31e790-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.416370 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-cert-memcached-mtls\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.416383 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.416402 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-run\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.416330 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.427644 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-cert-memcached-mtls\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.427774 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.428167 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-scripts\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.428435 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-config-data-custom\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.428884 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-config-data\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.439033 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76jm9\" (UniqueName: \"kubernetes.io/projected/d51d078b-04c1-4ea7-9411-68e8e8f9f160-kube-api-access-76jm9\") pod \"cinder-backup-0\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.517731 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-config-data\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.517846 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.517936 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/34dccb04-d413-4e39-b2c9-87bcec31e790-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.517959 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-cert-memcached-mtls\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.518011 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-scripts\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.518042 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.518076 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9x92t\" (UniqueName: \"kubernetes.io/projected/34dccb04-d413-4e39-b2c9-87bcec31e790-kube-api-access-9x92t\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.520309 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/34dccb04-d413-4e39-b2c9-87bcec31e790-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.521218 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-scripts\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.521891 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-config-data\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.522218 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.522341 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-cert-memcached-mtls\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.526658 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.534510 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.539826 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9x92t\" (UniqueName: \"kubernetes.io/projected/34dccb04-d413-4e39-b2c9-87bcec31e790-kube-api-access-9x92t\") pod \"cinder-scheduler-0\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.606818 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.706155 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.956505 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Sep 30 17:25:28 crc kubenswrapper[4818]: W0930 17:25:28.968973 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod34dccb04_d413_4e39_b2c9_87bcec31e790.slice/crio-e05f41bc4090d174cd0c3e8f5ad2d66648a665f60abba466360409403034068f WatchSource:0}: Error finding container e05f41bc4090d174cd0c3e8f5ad2d66648a665f60abba466360409403034068f: Status 404 returned error can't find the container with id e05f41bc4090d174cd0c3e8f5ad2d66648a665f60abba466360409403034068f Sep 30 17:25:28 crc kubenswrapper[4818]: I0930 17:25:28.995354 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Sep 30 17:25:29 crc kubenswrapper[4818]: W0930 17:25:29.009958 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd51d078b_04c1_4ea7_9411_68e8e8f9f160.slice/crio-af750bdfbf7c9338c17596f2506335c2adcb0fb972093601b1bb1040008baca6 WatchSource:0}: Error finding container af750bdfbf7c9338c17596f2506335c2adcb0fb972093601b1bb1040008baca6: Status 404 returned error can't find the container with id af750bdfbf7c9338c17596f2506335c2adcb0fb972093601b1bb1040008baca6 Sep 30 17:25:29 crc kubenswrapper[4818]: I0930 17:25:29.152894 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"34dccb04-d413-4e39-b2c9-87bcec31e790","Type":"ContainerStarted","Data":"e05f41bc4090d174cd0c3e8f5ad2d66648a665f60abba466360409403034068f"} Sep 30 17:25:29 crc kubenswrapper[4818]: I0930 17:25:29.156268 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"d51d078b-04c1-4ea7-9411-68e8e8f9f160","Type":"ContainerStarted","Data":"af750bdfbf7c9338c17596f2506335c2adcb0fb972093601b1bb1040008baca6"} Sep 30 17:25:29 crc kubenswrapper[4818]: I0930 17:25:29.916332 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.032759 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c294afe4-ca9a-4d7a-8069-a46a2cceab60" path="/var/lib/kubelet/pods/c294afe4-ca9a-4d7a-8069-a46a2cceab60/volumes" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.033342 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d72ee158-f9de-476e-8706-d8197d5bc5d8" path="/var/lib/kubelet/pods/d72ee158-f9de-476e-8706-d8197d5bc5d8/volumes" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.165059 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"34dccb04-d413-4e39-b2c9-87bcec31e790","Type":"ContainerStarted","Data":"b2bd0e3ab6389304c9a732921abbba0eb28f49235eee3bc29520f7122ffe8da8"} Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.167517 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"d51d078b-04c1-4ea7-9411-68e8e8f9f160","Type":"ContainerStarted","Data":"122b5438074117eae406345c0aaf52c85632d2ba2737a144486e0957b93f7738"} Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.167770 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"d51d078b-04c1-4ea7-9411-68e8e8f9f160","Type":"ContainerStarted","Data":"2da642d9159434effe1f7ba26059b49004b8f48e5b3a6fd42f4243d8b1f1f910"} Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.171810 4818 generic.go:334] "Generic (PLEG): container finished" podID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerID="b76379470dffe183f2b18fa05822eb74ab83756077433ba1cbbee3c8e6ea118d" exitCode=0 Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.171857 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f","Type":"ContainerDied","Data":"b76379470dffe183f2b18fa05822eb74ab83756077433ba1cbbee3c8e6ea118d"} Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.218136 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/cinder-backup-0" podStartSLOduration=2.218115865 podStartE2EDuration="2.218115865s" podCreationTimestamp="2025-09-30 17:25:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:25:30.208470584 +0000 UTC m=+1576.962742400" watchObservedRunningTime="2025-09-30 17:25:30.218115865 +0000 UTC m=+1576.972387681" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.500081 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.662887 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7f6cj\" (UniqueName: \"kubernetes.io/projected/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-kube-api-access-7f6cj\") pod \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.662975 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-log-httpd\") pod \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.663008 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-scripts\") pod \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.663056 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-sg-core-conf-yaml\") pod \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.663111 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-combined-ca-bundle\") pod \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.663152 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-ceilometer-tls-certs\") pod \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.663189 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-config-data\") pod \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.663232 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-run-httpd\") pod \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\" (UID: \"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f\") " Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.663953 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" (UID: "8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.668892 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" (UID: "8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.673194 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-kube-api-access-7f6cj" (OuterVolumeSpecName: "kube-api-access-7f6cj") pod "8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" (UID: "8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f"). InnerVolumeSpecName "kube-api-access-7f6cj". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.676023 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-scripts" (OuterVolumeSpecName: "scripts") pod "8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" (UID: "8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.727355 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" (UID: "8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.735594 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" (UID: "8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.765724 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.765759 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.765767 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.765777 4818 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.765786 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.765796 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7f6cj\" (UniqueName: \"kubernetes.io/projected/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-kube-api-access-7f6cj\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.786277 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" (UID: "8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.809805 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-config-data" (OuterVolumeSpecName: "config-data") pod "8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" (UID: "8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.867973 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:30 crc kubenswrapper[4818]: I0930 17:25:30.868019 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.098237 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.197077 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.197075 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f","Type":"ContainerDied","Data":"98aff47452a9135c392cc83e8f4d6c18126e138f0ae4da2b7ac87ea4e24eb08b"} Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.197745 4818 scope.go:117] "RemoveContainer" containerID="f1bebd7390532617ddf99abd6c925fe5a652476aa71ad38bb78a3ad9358204ac" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.205321 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"34dccb04-d413-4e39-b2c9-87bcec31e790","Type":"ContainerStarted","Data":"45fabd2d546a6f347495c79461371387123bd87a09e0d152b779503edae7933a"} Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.229440 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/cinder-scheduler-0" podStartSLOduration=3.229407843 podStartE2EDuration="3.229407843s" podCreationTimestamp="2025-09-30 17:25:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:25:31.224017857 +0000 UTC m=+1577.978289683" watchObservedRunningTime="2025-09-30 17:25:31.229407843 +0000 UTC m=+1577.983679659" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.236852 4818 scope.go:117] "RemoveContainer" containerID="5bdd18722c833531c7421f248f5aa6befa10ec60906dc839d1dd31024103f5d5" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.249483 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.257301 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.267430 4818 scope.go:117] "RemoveContainer" containerID="b76379470dffe183f2b18fa05822eb74ab83756077433ba1cbbee3c8e6ea118d" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.271144 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:25:31 crc kubenswrapper[4818]: E0930 17:25:31.271632 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerName="ceilometer-notification-agent" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.271718 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerName="ceilometer-notification-agent" Sep 30 17:25:31 crc kubenswrapper[4818]: E0930 17:25:31.271795 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerName="sg-core" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.271867 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerName="sg-core" Sep 30 17:25:31 crc kubenswrapper[4818]: E0930 17:25:31.271984 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerName="proxy-httpd" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.272048 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerName="proxy-httpd" Sep 30 17:25:31 crc kubenswrapper[4818]: E0930 17:25:31.272124 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerName="ceilometer-central-agent" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.272287 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerName="ceilometer-central-agent" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.272498 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerName="ceilometer-notification-agent" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.272605 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerName="sg-core" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.272672 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerName="ceilometer-central-agent" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.272740 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" containerName="proxy-httpd" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.274778 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.279807 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.280016 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.280132 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.293153 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.299630 4818 scope.go:117] "RemoveContainer" containerID="f9d269a88531a325af77e204a349f1a4e83f80539d65177d99d2449ac1fc6562" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.376851 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.377249 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m86rj\" (UniqueName: \"kubernetes.io/projected/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-kube-api-access-m86rj\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.377276 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-config-data\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.377313 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.377335 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.377354 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-log-httpd\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.377420 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-scripts\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.377450 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-run-httpd\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.478511 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.478557 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-log-httpd\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.478636 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-scripts\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.478667 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-run-httpd\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.478733 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.478799 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m86rj\" (UniqueName: \"kubernetes.io/projected/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-kube-api-access-m86rj\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.478821 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-config-data\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.478855 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.479336 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-run-httpd\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.479398 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-log-httpd\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.494322 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-config-data\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.497336 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.497496 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-scripts\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.497722 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.498396 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.503346 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m86rj\" (UniqueName: \"kubernetes.io/projected/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-kube-api-access-m86rj\") pod \"ceilometer-0\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:31 crc kubenswrapper[4818]: I0930 17:25:31.598624 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:32 crc kubenswrapper[4818]: I0930 17:25:32.035592 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f" path="/var/lib/kubelet/pods/8e83d5a3-fda5-4a18-bbf4-ac4e6a28014f/volumes" Sep 30 17:25:32 crc kubenswrapper[4818]: I0930 17:25:32.059307 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:25:32 crc kubenswrapper[4818]: W0930 17:25:32.063168 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0f9a9728_8755_4c14_bb7d_d8ae57219dfd.slice/crio-bd23974a838dad2bfc5bf52f356b118426051961c525fdee4ee8e63f9493d83b WatchSource:0}: Error finding container bd23974a838dad2bfc5bf52f356b118426051961c525fdee4ee8e63f9493d83b: Status 404 returned error can't find the container with id bd23974a838dad2bfc5bf52f356b118426051961c525fdee4ee8e63f9493d83b Sep 30 17:25:32 crc kubenswrapper[4818]: I0930 17:25:32.215413 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0f9a9728-8755-4c14-bb7d-d8ae57219dfd","Type":"ContainerStarted","Data":"bd23974a838dad2bfc5bf52f356b118426051961c525fdee4ee8e63f9493d83b"} Sep 30 17:25:32 crc kubenswrapper[4818]: I0930 17:25:32.300104 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:32 crc kubenswrapper[4818]: I0930 17:25:32.300443 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:33 crc kubenswrapper[4818]: I0930 17:25:33.226368 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0f9a9728-8755-4c14-bb7d-d8ae57219dfd","Type":"ContainerStarted","Data":"9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c"} Sep 30 17:25:33 crc kubenswrapper[4818]: I0930 17:25:33.481547 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_f6778e25-7694-4fc9-9f75-b28e21e39099/watcher-decision-engine/0.log" Sep 30 17:25:33 crc kubenswrapper[4818]: I0930 17:25:33.534661 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:33 crc kubenswrapper[4818]: I0930 17:25:33.607639 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.075490 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.222636 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-cert-memcached-mtls\") pod \"f6778e25-7694-4fc9-9f75-b28e21e39099\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.223031 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-config-data\") pod \"f6778e25-7694-4fc9-9f75-b28e21e39099\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.223102 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-custom-prometheus-ca\") pod \"f6778e25-7694-4fc9-9f75-b28e21e39099\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.223187 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f5mn2\" (UniqueName: \"kubernetes.io/projected/f6778e25-7694-4fc9-9f75-b28e21e39099-kube-api-access-f5mn2\") pod \"f6778e25-7694-4fc9-9f75-b28e21e39099\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.223213 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-combined-ca-bundle\") pod \"f6778e25-7694-4fc9-9f75-b28e21e39099\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.223235 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6778e25-7694-4fc9-9f75-b28e21e39099-logs\") pod \"f6778e25-7694-4fc9-9f75-b28e21e39099\" (UID: \"f6778e25-7694-4fc9-9f75-b28e21e39099\") " Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.223894 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6778e25-7694-4fc9-9f75-b28e21e39099-logs" (OuterVolumeSpecName: "logs") pod "f6778e25-7694-4fc9-9f75-b28e21e39099" (UID: "f6778e25-7694-4fc9-9f75-b28e21e39099"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.230047 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6778e25-7694-4fc9-9f75-b28e21e39099-kube-api-access-f5mn2" (OuterVolumeSpecName: "kube-api-access-f5mn2") pod "f6778e25-7694-4fc9-9f75-b28e21e39099" (UID: "f6778e25-7694-4fc9-9f75-b28e21e39099"). InnerVolumeSpecName "kube-api-access-f5mn2". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.253382 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0f9a9728-8755-4c14-bb7d-d8ae57219dfd","Type":"ContainerStarted","Data":"b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da"} Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.253431 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0f9a9728-8755-4c14-bb7d-d8ae57219dfd","Type":"ContainerStarted","Data":"b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c"} Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.255253 4818 generic.go:334] "Generic (PLEG): container finished" podID="f6778e25-7694-4fc9-9f75-b28e21e39099" containerID="0a348a50d34a23fd31682ffc32c6c847704627ae1012a3e4d9078f9f82e93aee" exitCode=0 Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.255282 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"f6778e25-7694-4fc9-9f75-b28e21e39099","Type":"ContainerDied","Data":"0a348a50d34a23fd31682ffc32c6c847704627ae1012a3e4d9078f9f82e93aee"} Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.255303 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"f6778e25-7694-4fc9-9f75-b28e21e39099","Type":"ContainerDied","Data":"36cc98f21ab9bdb6e73ebfbe3db64a2833131a2ca172af033cc059fe426cc611"} Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.255318 4818 scope.go:117] "RemoveContainer" containerID="0a348a50d34a23fd31682ffc32c6c847704627ae1012a3e4d9078f9f82e93aee" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.255451 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.256140 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "f6778e25-7694-4fc9-9f75-b28e21e39099" (UID: "f6778e25-7694-4fc9-9f75-b28e21e39099"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.256646 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f6778e25-7694-4fc9-9f75-b28e21e39099" (UID: "f6778e25-7694-4fc9-9f75-b28e21e39099"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.272650 4818 scope.go:117] "RemoveContainer" containerID="0a348a50d34a23fd31682ffc32c6c847704627ae1012a3e4d9078f9f82e93aee" Sep 30 17:25:34 crc kubenswrapper[4818]: E0930 17:25:34.273163 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a348a50d34a23fd31682ffc32c6c847704627ae1012a3e4d9078f9f82e93aee\": container with ID starting with 0a348a50d34a23fd31682ffc32c6c847704627ae1012a3e4d9078f9f82e93aee not found: ID does not exist" containerID="0a348a50d34a23fd31682ffc32c6c847704627ae1012a3e4d9078f9f82e93aee" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.273219 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a348a50d34a23fd31682ffc32c6c847704627ae1012a3e4d9078f9f82e93aee"} err="failed to get container status \"0a348a50d34a23fd31682ffc32c6c847704627ae1012a3e4d9078f9f82e93aee\": rpc error: code = NotFound desc = could not find container \"0a348a50d34a23fd31682ffc32c6c847704627ae1012a3e4d9078f9f82e93aee\": container with ID starting with 0a348a50d34a23fd31682ffc32c6c847704627ae1012a3e4d9078f9f82e93aee not found: ID does not exist" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.305225 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-config-data" (OuterVolumeSpecName: "config-data") pod "f6778e25-7694-4fc9-9f75-b28e21e39099" (UID: "f6778e25-7694-4fc9-9f75-b28e21e39099"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.318131 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "f6778e25-7694-4fc9-9f75-b28e21e39099" (UID: "f6778e25-7694-4fc9-9f75-b28e21e39099"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.325011 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.325040 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.325049 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.325058 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f5mn2\" (UniqueName: \"kubernetes.io/projected/f6778e25-7694-4fc9-9f75-b28e21e39099-kube-api-access-f5mn2\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.325066 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6778e25-7694-4fc9-9f75-b28e21e39099-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.325076 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6778e25-7694-4fc9-9f75-b28e21e39099-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.589020 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.601322 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.636726 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:25:34 crc kubenswrapper[4818]: E0930 17:25:34.637227 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6778e25-7694-4fc9-9f75-b28e21e39099" containerName="watcher-decision-engine" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.637257 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6778e25-7694-4fc9-9f75-b28e21e39099" containerName="watcher-decision-engine" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.637528 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6778e25-7694-4fc9-9f75-b28e21e39099" containerName="watcher-decision-engine" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.638417 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.642340 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.652999 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.732587 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7488\" (UniqueName: \"kubernetes.io/projected/d08329e8-65f4-466c-aa9a-e1f488b8446e-kube-api-access-c7488\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.732764 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.732882 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.732919 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d08329e8-65f4-466c-aa9a-e1f488b8446e-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.732962 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.733005 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.834493 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.835792 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.836281 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d08329e8-65f4-466c-aa9a-e1f488b8446e-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.836425 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.836571 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.836751 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7488\" (UniqueName: \"kubernetes.io/projected/d08329e8-65f4-466c-aa9a-e1f488b8446e-kube-api-access-c7488\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.837217 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d08329e8-65f4-466c-aa9a-e1f488b8446e-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.839911 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.841603 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.842136 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.854847 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.862022 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7488\" (UniqueName: \"kubernetes.io/projected/d08329e8-65f4-466c-aa9a-e1f488b8446e-kube-api-access-c7488\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:34 crc kubenswrapper[4818]: I0930 17:25:34.968809 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:35 crc kubenswrapper[4818]: I0930 17:25:35.460435 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:25:35 crc kubenswrapper[4818]: W0930 17:25:35.462651 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd08329e8_65f4_466c_aa9a_e1f488b8446e.slice/crio-d550e6db07b12f6af01906bcde3cc7ad8cd2b747983a3fd92fa25b812c29bea8 WatchSource:0}: Error finding container d550e6db07b12f6af01906bcde3cc7ad8cd2b747983a3fd92fa25b812c29bea8: Status 404 returned error can't find the container with id d550e6db07b12f6af01906bcde3cc7ad8cd2b747983a3fd92fa25b812c29bea8 Sep 30 17:25:36 crc kubenswrapper[4818]: I0930 17:25:36.032677 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6778e25-7694-4fc9-9f75-b28e21e39099" path="/var/lib/kubelet/pods/f6778e25-7694-4fc9-9f75-b28e21e39099/volumes" Sep 30 17:25:36 crc kubenswrapper[4818]: I0930 17:25:36.279852 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0f9a9728-8755-4c14-bb7d-d8ae57219dfd","Type":"ContainerStarted","Data":"735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47"} Sep 30 17:25:36 crc kubenswrapper[4818]: I0930 17:25:36.280251 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:36 crc kubenswrapper[4818]: I0930 17:25:36.281063 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"d08329e8-65f4-466c-aa9a-e1f488b8446e","Type":"ContainerStarted","Data":"ce3ad6efab22e9579cb3fc069743bb90b200a9fb6e0a2ce032c8cc9753ddd020"} Sep 30 17:25:36 crc kubenswrapper[4818]: I0930 17:25:36.281104 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"d08329e8-65f4-466c-aa9a-e1f488b8446e","Type":"ContainerStarted","Data":"d550e6db07b12f6af01906bcde3cc7ad8cd2b747983a3fd92fa25b812c29bea8"} Sep 30 17:25:36 crc kubenswrapper[4818]: I0930 17:25:36.300750 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=1.9205707429999999 podStartE2EDuration="5.300733636s" podCreationTimestamp="2025-09-30 17:25:31 +0000 UTC" firstStartedPulling="2025-09-30 17:25:32.065164572 +0000 UTC m=+1578.819436388" lastFinishedPulling="2025-09-30 17:25:35.445327445 +0000 UTC m=+1582.199599281" observedRunningTime="2025-09-30 17:25:36.297373835 +0000 UTC m=+1583.051645661" watchObservedRunningTime="2025-09-30 17:25:36.300733636 +0000 UTC m=+1583.055005452" Sep 30 17:25:36 crc kubenswrapper[4818]: I0930 17:25:36.318052 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=2.318036164 podStartE2EDuration="2.318036164s" podCreationTimestamp="2025-09-30 17:25:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:25:36.316693628 +0000 UTC m=+1583.070965444" watchObservedRunningTime="2025-09-30 17:25:36.318036164 +0000 UTC m=+1583.072307980" Sep 30 17:25:36 crc kubenswrapper[4818]: I0930 17:25:36.988071 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_d08329e8-65f4-466c-aa9a-e1f488b8446e/watcher-decision-engine/0.log" Sep 30 17:25:38 crc kubenswrapper[4818]: I0930 17:25:38.185018 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_d08329e8-65f4-466c-aa9a-e1f488b8446e/watcher-decision-engine/0.log" Sep 30 17:25:38 crc kubenswrapper[4818]: I0930 17:25:38.756230 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:38 crc kubenswrapper[4818]: I0930 17:25:38.860250 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:39 crc kubenswrapper[4818]: I0930 17:25:39.368238 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_d08329e8-65f4-466c-aa9a-e1f488b8446e/watcher-decision-engine/0.log" Sep 30 17:25:40 crc kubenswrapper[4818]: I0930 17:25:40.583381 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_d08329e8-65f4-466c-aa9a-e1f488b8446e/watcher-decision-engine/0.log" Sep 30 17:25:41 crc kubenswrapper[4818]: I0930 17:25:41.795779 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_d08329e8-65f4-466c-aa9a-e1f488b8446e/watcher-decision-engine/0.log" Sep 30 17:25:42 crc kubenswrapper[4818]: I0930 17:25:42.020607 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:25:42 crc kubenswrapper[4818]: E0930 17:25:42.021108 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:25:43 crc kubenswrapper[4818]: I0930 17:25:42.999850 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_d08329e8-65f4-466c-aa9a-e1f488b8446e/watcher-decision-engine/0.log" Sep 30 17:25:44 crc kubenswrapper[4818]: I0930 17:25:44.200328 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_d08329e8-65f4-466c-aa9a-e1f488b8446e/watcher-decision-engine/0.log" Sep 30 17:25:44 crc kubenswrapper[4818]: I0930 17:25:44.970427 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:45 crc kubenswrapper[4818]: I0930 17:25:45.010066 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:45 crc kubenswrapper[4818]: I0930 17:25:45.365707 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:45 crc kubenswrapper[4818]: I0930 17:25:45.420993 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_d08329e8-65f4-466c-aa9a-e1f488b8446e/watcher-decision-engine/0.log" Sep 30 17:25:45 crc kubenswrapper[4818]: I0930 17:25:45.494557 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:46 crc kubenswrapper[4818]: I0930 17:25:46.652817 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_d08329e8-65f4-466c-aa9a-e1f488b8446e/watcher-decision-engine/0.log" Sep 30 17:25:46 crc kubenswrapper[4818]: I0930 17:25:46.885275 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_d08329e8-65f4-466c-aa9a-e1f488b8446e/watcher-decision-engine/0.log" Sep 30 17:25:46 crc kubenswrapper[4818]: I0930 17:25:46.948043 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-db-sync-fns5p"] Sep 30 17:25:46 crc kubenswrapper[4818]: I0930 17:25:46.957506 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinder-db-sync-fns5p"] Sep 30 17:25:46 crc kubenswrapper[4818]: I0930 17:25:46.994691 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Sep 30 17:25:46 crc kubenswrapper[4818]: I0930 17:25:46.994962 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-backup-0" podUID="d51d078b-04c1-4ea7-9411-68e8e8f9f160" containerName="cinder-backup" containerID="cri-o://2da642d9159434effe1f7ba26059b49004b8f48e5b3a6fd42f4243d8b1f1f910" gracePeriod=30 Sep 30 17:25:46 crc kubenswrapper[4818]: I0930 17:25:46.995296 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-backup-0" podUID="d51d078b-04c1-4ea7-9411-68e8e8f9f160" containerName="probe" containerID="cri-o://122b5438074117eae406345c0aaf52c85632d2ba2737a144486e0957b93f7738" gracePeriod=30 Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.048834 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.049151 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-scheduler-0" podUID="34dccb04-d413-4e39-b2c9-87bcec31e790" containerName="cinder-scheduler" containerID="cri-o://b2bd0e3ab6389304c9a732921abbba0eb28f49235eee3bc29520f7122ffe8da8" gracePeriod=30 Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.049579 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-scheduler-0" podUID="34dccb04-d413-4e39-b2c9-87bcec31e790" containerName="probe" containerID="cri-o://45fabd2d546a6f347495c79461371387123bd87a09e0d152b779503edae7933a" gracePeriod=30 Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.059267 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.059539 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-api-0" podUID="3351d4d0-c886-4477-bc07-427cc064b4f7" containerName="cinder-api-log" containerID="cri-o://54ad1d586bec7dee1dc0790d87a3daf75422d7d613b74dede8247d7aac7fd5b8" gracePeriod=30 Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.060024 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-api-0" podUID="3351d4d0-c886-4477-bc07-427cc064b4f7" containerName="cinder-api" containerID="cri-o://1ac70bd903f9edf4488db80a3d44b6874efab86e3c7fd168d369f46252bd60dd" gracePeriod=30 Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.082547 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinder415e-account-delete-6s7pn"] Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.084195 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder415e-account-delete-6s7pn" Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.096499 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder415e-account-delete-6s7pn"] Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.114573 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-db-create-44kbl"] Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.129504 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinder-db-create-44kbl"] Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.159169 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder415e-account-delete-6s7pn"] Sep 30 17:25:47 crc kubenswrapper[4818]: E0930 17:25:47.159943 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-ptlxf], unattached volumes=[], failed to process volumes=[]: context canceled" pod="watcher-kuttl-default/cinder415e-account-delete-6s7pn" podUID="66af5cdb-f664-4931-b80d-2df1f4e327bc" Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.163452 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-415e-account-create-4h58z"] Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.166047 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ptlxf\" (UniqueName: \"kubernetes.io/projected/66af5cdb-f664-4931-b80d-2df1f4e327bc-kube-api-access-ptlxf\") pod \"cinder415e-account-delete-6s7pn\" (UID: \"66af5cdb-f664-4931-b80d-2df1f4e327bc\") " pod="watcher-kuttl-default/cinder415e-account-delete-6s7pn" Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.171994 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinder-415e-account-create-4h58z"] Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.267765 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ptlxf\" (UniqueName: \"kubernetes.io/projected/66af5cdb-f664-4931-b80d-2df1f4e327bc-kube-api-access-ptlxf\") pod \"cinder415e-account-delete-6s7pn\" (UID: \"66af5cdb-f664-4931-b80d-2df1f4e327bc\") " pod="watcher-kuttl-default/cinder415e-account-delete-6s7pn" Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.292448 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptlxf\" (UniqueName: \"kubernetes.io/projected/66af5cdb-f664-4931-b80d-2df1f4e327bc-kube-api-access-ptlxf\") pod \"cinder415e-account-delete-6s7pn\" (UID: \"66af5cdb-f664-4931-b80d-2df1f4e327bc\") " pod="watcher-kuttl-default/cinder415e-account-delete-6s7pn" Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.384288 4818 generic.go:334] "Generic (PLEG): container finished" podID="3351d4d0-c886-4477-bc07-427cc064b4f7" containerID="54ad1d586bec7dee1dc0790d87a3daf75422d7d613b74dede8247d7aac7fd5b8" exitCode=143 Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.384398 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder415e-account-delete-6s7pn" Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.384393 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"3351d4d0-c886-4477-bc07-427cc064b4f7","Type":"ContainerDied","Data":"54ad1d586bec7dee1dc0790d87a3daf75422d7d613b74dede8247d7aac7fd5b8"} Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.394378 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder415e-account-delete-6s7pn" Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.470980 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ptlxf\" (UniqueName: \"kubernetes.io/projected/66af5cdb-f664-4931-b80d-2df1f4e327bc-kube-api-access-ptlxf\") pod \"66af5cdb-f664-4931-b80d-2df1f4e327bc\" (UID: \"66af5cdb-f664-4931-b80d-2df1f4e327bc\") " Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.477155 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66af5cdb-f664-4931-b80d-2df1f4e327bc-kube-api-access-ptlxf" (OuterVolumeSpecName: "kube-api-access-ptlxf") pod "66af5cdb-f664-4931-b80d-2df1f4e327bc" (UID: "66af5cdb-f664-4931-b80d-2df1f4e327bc"). InnerVolumeSpecName "kube-api-access-ptlxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:25:47 crc kubenswrapper[4818]: I0930 17:25:47.572996 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ptlxf\" (UniqueName: \"kubernetes.io/projected/66af5cdb-f664-4931-b80d-2df1f4e327bc-kube-api-access-ptlxf\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:48 crc kubenswrapper[4818]: I0930 17:25:48.030359 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50baa69b-1cfe-4b25-8186-5a21b7d9c889" path="/var/lib/kubelet/pods/50baa69b-1cfe-4b25-8186-5a21b7d9c889/volumes" Sep 30 17:25:48 crc kubenswrapper[4818]: I0930 17:25:48.030858 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b688e71-d6a3-44cc-a380-223370ad26ad" path="/var/lib/kubelet/pods/7b688e71-d6a3-44cc-a380-223370ad26ad/volumes" Sep 30 17:25:48 crc kubenswrapper[4818]: I0930 17:25:48.031409 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe2377e1-d738-46b3-8f4c-d22503a2a648" path="/var/lib/kubelet/pods/fe2377e1-d738-46b3-8f4c-d22503a2a648/volumes" Sep 30 17:25:48 crc kubenswrapper[4818]: I0930 17:25:48.092986 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_d08329e8-65f4-466c-aa9a-e1f488b8446e/watcher-decision-engine/0.log" Sep 30 17:25:48 crc kubenswrapper[4818]: I0930 17:25:48.396020 4818 generic.go:334] "Generic (PLEG): container finished" podID="d51d078b-04c1-4ea7-9411-68e8e8f9f160" containerID="122b5438074117eae406345c0aaf52c85632d2ba2737a144486e0957b93f7738" exitCode=0 Sep 30 17:25:48 crc kubenswrapper[4818]: I0930 17:25:48.396143 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"d51d078b-04c1-4ea7-9411-68e8e8f9f160","Type":"ContainerDied","Data":"122b5438074117eae406345c0aaf52c85632d2ba2737a144486e0957b93f7738"} Sep 30 17:25:48 crc kubenswrapper[4818]: I0930 17:25:48.399166 4818 generic.go:334] "Generic (PLEG): container finished" podID="34dccb04-d413-4e39-b2c9-87bcec31e790" containerID="45fabd2d546a6f347495c79461371387123bd87a09e0d152b779503edae7933a" exitCode=0 Sep 30 17:25:48 crc kubenswrapper[4818]: I0930 17:25:48.399233 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"34dccb04-d413-4e39-b2c9-87bcec31e790","Type":"ContainerDied","Data":"45fabd2d546a6f347495c79461371387123bd87a09e0d152b779503edae7933a"} Sep 30 17:25:48 crc kubenswrapper[4818]: I0930 17:25:48.399269 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder415e-account-delete-6s7pn" Sep 30 17:25:48 crc kubenswrapper[4818]: I0930 17:25:48.447992 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder415e-account-delete-6s7pn"] Sep 30 17:25:48 crc kubenswrapper[4818]: I0930 17:25:48.458109 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinder415e-account-delete-6s7pn"] Sep 30 17:25:48 crc kubenswrapper[4818]: I0930 17:25:48.619568 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:25:48 crc kubenswrapper[4818]: I0930 17:25:48.619810 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="d08329e8-65f4-466c-aa9a-e1f488b8446e" containerName="watcher-decision-engine" containerID="cri-o://ce3ad6efab22e9579cb3fc069743bb90b200a9fb6e0a2ce032c8cc9753ddd020" gracePeriod=30 Sep 30 17:25:49 crc kubenswrapper[4818]: I0930 17:25:49.330341 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_d08329e8-65f4-466c-aa9a-e1f488b8446e/watcher-decision-engine/0.log" Sep 30 17:25:49 crc kubenswrapper[4818]: I0930 17:25:49.343085 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:25:49 crc kubenswrapper[4818]: I0930 17:25:49.343336 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerName="ceilometer-central-agent" containerID="cri-o://9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c" gracePeriod=30 Sep 30 17:25:49 crc kubenswrapper[4818]: I0930 17:25:49.343450 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerName="ceilometer-notification-agent" containerID="cri-o://b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c" gracePeriod=30 Sep 30 17:25:49 crc kubenswrapper[4818]: I0930 17:25:49.343451 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerName="sg-core" containerID="cri-o://b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da" gracePeriod=30 Sep 30 17:25:49 crc kubenswrapper[4818]: I0930 17:25:49.343451 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerName="proxy-httpd" containerID="cri-o://735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47" gracePeriod=30 Sep 30 17:25:49 crc kubenswrapper[4818]: I0930 17:25:49.348553 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/ceilometer-0" podUID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.197:3000/\": EOF" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.035505 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66af5cdb-f664-4931-b80d-2df1f4e327bc" path="/var/lib/kubelet/pods/66af5cdb-f664-4931-b80d-2df1f4e327bc/volumes" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.155730 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.220246 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-combined-ca-bundle\") pod \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.220505 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-sg-core-conf-yaml\") pod \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.220620 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-ceilometer-tls-certs\") pod \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.220738 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m86rj\" (UniqueName: \"kubernetes.io/projected/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-kube-api-access-m86rj\") pod \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.220836 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-scripts\") pod \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.220930 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-run-httpd\") pod \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.221319 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-log-httpd\") pod \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.221414 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-config-data\") pod \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\" (UID: \"0f9a9728-8755-4c14-bb7d-d8ae57219dfd\") " Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.221275 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0f9a9728-8755-4c14-bb7d-d8ae57219dfd" (UID: "0f9a9728-8755-4c14-bb7d-d8ae57219dfd"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.224189 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0f9a9728-8755-4c14-bb7d-d8ae57219dfd" (UID: "0f9a9728-8755-4c14-bb7d-d8ae57219dfd"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.227101 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-scripts" (OuterVolumeSpecName: "scripts") pod "0f9a9728-8755-4c14-bb7d-d8ae57219dfd" (UID: "0f9a9728-8755-4c14-bb7d-d8ae57219dfd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.228190 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-kube-api-access-m86rj" (OuterVolumeSpecName: "kube-api-access-m86rj") pod "0f9a9728-8755-4c14-bb7d-d8ae57219dfd" (UID: "0f9a9728-8755-4c14-bb7d-d8ae57219dfd"). InnerVolumeSpecName "kube-api-access-m86rj". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.264214 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0f9a9728-8755-4c14-bb7d-d8ae57219dfd" (UID: "0f9a9728-8755-4c14-bb7d-d8ae57219dfd"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.292941 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "0f9a9728-8755-4c14-bb7d-d8ae57219dfd" (UID: "0f9a9728-8755-4c14-bb7d-d8ae57219dfd"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.323269 4818 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.323297 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m86rj\" (UniqueName: \"kubernetes.io/projected/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-kube-api-access-m86rj\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.323307 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.323315 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.323325 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.323334 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.326194 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0f9a9728-8755-4c14-bb7d-d8ae57219dfd" (UID: "0f9a9728-8755-4c14-bb7d-d8ae57219dfd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.334709 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-config-data" (OuterVolumeSpecName: "config-data") pod "0f9a9728-8755-4c14-bb7d-d8ae57219dfd" (UID: "0f9a9728-8755-4c14-bb7d-d8ae57219dfd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.423257 4818 generic.go:334] "Generic (PLEG): container finished" podID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerID="735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47" exitCode=0 Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.423493 4818 generic.go:334] "Generic (PLEG): container finished" podID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerID="b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da" exitCode=2 Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.423551 4818 generic.go:334] "Generic (PLEG): container finished" podID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerID="b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c" exitCode=0 Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.423600 4818 generic.go:334] "Generic (PLEG): container finished" podID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerID="9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c" exitCode=0 Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.423374 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.423360 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0f9a9728-8755-4c14-bb7d-d8ae57219dfd","Type":"ContainerDied","Data":"735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47"} Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.424313 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0f9a9728-8755-4c14-bb7d-d8ae57219dfd","Type":"ContainerDied","Data":"b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da"} Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.424340 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0f9a9728-8755-4c14-bb7d-d8ae57219dfd","Type":"ContainerDied","Data":"b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c"} Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.424352 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0f9a9728-8755-4c14-bb7d-d8ae57219dfd","Type":"ContainerDied","Data":"9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c"} Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.424366 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0f9a9728-8755-4c14-bb7d-d8ae57219dfd","Type":"ContainerDied","Data":"bd23974a838dad2bfc5bf52f356b118426051961c525fdee4ee8e63f9493d83b"} Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.424385 4818 scope.go:117] "RemoveContainer" containerID="735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.425202 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.425233 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f9a9728-8755-4c14-bb7d-d8ae57219dfd-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.463065 4818 scope.go:117] "RemoveContainer" containerID="b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.470144 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.491439 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.502996 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:25:50 crc kubenswrapper[4818]: E0930 17:25:50.503385 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerName="ceilometer-notification-agent" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.503405 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerName="ceilometer-notification-agent" Sep 30 17:25:50 crc kubenswrapper[4818]: E0930 17:25:50.503424 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerName="ceilometer-central-agent" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.503451 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerName="ceilometer-central-agent" Sep 30 17:25:50 crc kubenswrapper[4818]: E0930 17:25:50.503471 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerName="sg-core" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.503480 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerName="sg-core" Sep 30 17:25:50 crc kubenswrapper[4818]: E0930 17:25:50.503503 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerName="proxy-httpd" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.503511 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerName="proxy-httpd" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.503722 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerName="ceilometer-notification-agent" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.503747 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerName="ceilometer-central-agent" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.503757 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerName="sg-core" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.503781 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" containerName="proxy-httpd" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.506441 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.507582 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_d08329e8-65f4-466c-aa9a-e1f488b8446e/watcher-decision-engine/0.log" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.510418 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.510604 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.510754 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.521892 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.526017 4818 scope.go:117] "RemoveContainer" containerID="b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.527033 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/cinder-api-0" podUID="3351d4d0-c886-4477-bc07-427cc064b4f7" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.194:8776/healthcheck\": read tcp 10.217.0.2:36080->10.217.0.194:8776: read: connection reset by peer" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.561807 4818 scope.go:117] "RemoveContainer" containerID="9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.628659 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.628777 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e51b830-4d45-4b0f-8773-40e46ff074ee-log-httpd\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.628834 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.628873 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.628990 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lh85\" (UniqueName: \"kubernetes.io/projected/6e51b830-4d45-4b0f-8773-40e46ff074ee-kube-api-access-7lh85\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.629022 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-scripts\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.629086 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-config-data\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.629239 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e51b830-4d45-4b0f-8773-40e46ff074ee-run-httpd\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.650617 4818 scope.go:117] "RemoveContainer" containerID="735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47" Sep 30 17:25:50 crc kubenswrapper[4818]: E0930 17:25:50.651420 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47\": container with ID starting with 735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47 not found: ID does not exist" containerID="735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.651445 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47"} err="failed to get container status \"735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47\": rpc error: code = NotFound desc = could not find container \"735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47\": container with ID starting with 735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47 not found: ID does not exist" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.651465 4818 scope.go:117] "RemoveContainer" containerID="b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da" Sep 30 17:25:50 crc kubenswrapper[4818]: E0930 17:25:50.651734 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da\": container with ID starting with b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da not found: ID does not exist" containerID="b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.651764 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da"} err="failed to get container status \"b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da\": rpc error: code = NotFound desc = could not find container \"b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da\": container with ID starting with b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da not found: ID does not exist" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.651777 4818 scope.go:117] "RemoveContainer" containerID="b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c" Sep 30 17:25:50 crc kubenswrapper[4818]: E0930 17:25:50.652179 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c\": container with ID starting with b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c not found: ID does not exist" containerID="b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.652197 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c"} err="failed to get container status \"b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c\": rpc error: code = NotFound desc = could not find container \"b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c\": container with ID starting with b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c not found: ID does not exist" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.652210 4818 scope.go:117] "RemoveContainer" containerID="9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c" Sep 30 17:25:50 crc kubenswrapper[4818]: E0930 17:25:50.652532 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c\": container with ID starting with 9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c not found: ID does not exist" containerID="9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.652549 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c"} err="failed to get container status \"9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c\": rpc error: code = NotFound desc = could not find container \"9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c\": container with ID starting with 9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c not found: ID does not exist" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.652561 4818 scope.go:117] "RemoveContainer" containerID="735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.654334 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47"} err="failed to get container status \"735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47\": rpc error: code = NotFound desc = could not find container \"735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47\": container with ID starting with 735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47 not found: ID does not exist" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.654352 4818 scope.go:117] "RemoveContainer" containerID="b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.655574 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da"} err="failed to get container status \"b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da\": rpc error: code = NotFound desc = could not find container \"b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da\": container with ID starting with b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da not found: ID does not exist" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.656358 4818 scope.go:117] "RemoveContainer" containerID="b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.658845 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c"} err="failed to get container status \"b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c\": rpc error: code = NotFound desc = could not find container \"b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c\": container with ID starting with b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c not found: ID does not exist" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.658897 4818 scope.go:117] "RemoveContainer" containerID="9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.659292 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c"} err="failed to get container status \"9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c\": rpc error: code = NotFound desc = could not find container \"9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c\": container with ID starting with 9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c not found: ID does not exist" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.659316 4818 scope.go:117] "RemoveContainer" containerID="735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.659584 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47"} err="failed to get container status \"735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47\": rpc error: code = NotFound desc = could not find container \"735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47\": container with ID starting with 735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47 not found: ID does not exist" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.659600 4818 scope.go:117] "RemoveContainer" containerID="b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.660507 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da"} err="failed to get container status \"b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da\": rpc error: code = NotFound desc = could not find container \"b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da\": container with ID starting with b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da not found: ID does not exist" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.660524 4818 scope.go:117] "RemoveContainer" containerID="b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.661076 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c"} err="failed to get container status \"b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c\": rpc error: code = NotFound desc = could not find container \"b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c\": container with ID starting with b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c not found: ID does not exist" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.661105 4818 scope.go:117] "RemoveContainer" containerID="9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.661402 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c"} err="failed to get container status \"9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c\": rpc error: code = NotFound desc = could not find container \"9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c\": container with ID starting with 9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c not found: ID does not exist" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.661454 4818 scope.go:117] "RemoveContainer" containerID="735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.661771 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47"} err="failed to get container status \"735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47\": rpc error: code = NotFound desc = could not find container \"735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47\": container with ID starting with 735f7a9a7720731797479a8905f2d351af565bd3de65e1383db59e4cf21dcb47 not found: ID does not exist" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.661795 4818 scope.go:117] "RemoveContainer" containerID="b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.662217 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da"} err="failed to get container status \"b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da\": rpc error: code = NotFound desc = could not find container \"b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da\": container with ID starting with b41ec8a616ea3dcfd943ced5a8960b56b0d043f1802fe405e75c8d88533ae5da not found: ID does not exist" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.662237 4818 scope.go:117] "RemoveContainer" containerID="b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.662613 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c"} err="failed to get container status \"b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c\": rpc error: code = NotFound desc = could not find container \"b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c\": container with ID starting with b12a38e64937462bad56c18d3e620cd2778d49fbaba420dfecc6271b50c54e0c not found: ID does not exist" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.662632 4818 scope.go:117] "RemoveContainer" containerID="9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.663116 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c"} err="failed to get container status \"9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c\": rpc error: code = NotFound desc = could not find container \"9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c\": container with ID starting with 9a8dce7f6596081cfde560903d82da017d55a17314ece0f47634038cea2dad6c not found: ID does not exist" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.731274 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.732265 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e51b830-4d45-4b0f-8773-40e46ff074ee-log-httpd\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.732354 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.732432 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.732481 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lh85\" (UniqueName: \"kubernetes.io/projected/6e51b830-4d45-4b0f-8773-40e46ff074ee-kube-api-access-7lh85\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.732539 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-scripts\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.732571 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-config-data\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.732625 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e51b830-4d45-4b0f-8773-40e46ff074ee-run-httpd\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.733210 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e51b830-4d45-4b0f-8773-40e46ff074ee-run-httpd\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.738572 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e51b830-4d45-4b0f-8773-40e46ff074ee-log-httpd\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.739008 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-scripts\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.742468 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-config-data\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.749707 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.751390 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.753245 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lh85\" (UniqueName: \"kubernetes.io/projected/6e51b830-4d45-4b0f-8773-40e46ff074ee-kube-api-access-7lh85\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.768182 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.836551 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:50 crc kubenswrapper[4818]: I0930 17:25:50.909765 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.037405 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-internal-tls-certs\") pod \"3351d4d0-c886-4477-bc07-427cc064b4f7\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.037469 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-config-data\") pod \"3351d4d0-c886-4477-bc07-427cc064b4f7\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.037499 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3351d4d0-c886-4477-bc07-427cc064b4f7-logs\") pod \"3351d4d0-c886-4477-bc07-427cc064b4f7\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.037551 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-scripts\") pod \"3351d4d0-c886-4477-bc07-427cc064b4f7\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.037626 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3351d4d0-c886-4477-bc07-427cc064b4f7-etc-machine-id\") pod \"3351d4d0-c886-4477-bc07-427cc064b4f7\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.037666 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-cert-memcached-mtls\") pod \"3351d4d0-c886-4477-bc07-427cc064b4f7\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.037690 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-public-tls-certs\") pod \"3351d4d0-c886-4477-bc07-427cc064b4f7\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.037713 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-config-data-custom\") pod \"3351d4d0-c886-4477-bc07-427cc064b4f7\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.037756 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-combined-ca-bundle\") pod \"3351d4d0-c886-4477-bc07-427cc064b4f7\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.037809 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lwcqm\" (UniqueName: \"kubernetes.io/projected/3351d4d0-c886-4477-bc07-427cc064b4f7-kube-api-access-lwcqm\") pod \"3351d4d0-c886-4477-bc07-427cc064b4f7\" (UID: \"3351d4d0-c886-4477-bc07-427cc064b4f7\") " Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.037971 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3351d4d0-c886-4477-bc07-427cc064b4f7-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "3351d4d0-c886-4477-bc07-427cc064b4f7" (UID: "3351d4d0-c886-4477-bc07-427cc064b4f7"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.038204 4818 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3351d4d0-c886-4477-bc07-427cc064b4f7-etc-machine-id\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.039030 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3351d4d0-c886-4477-bc07-427cc064b4f7-logs" (OuterVolumeSpecName: "logs") pod "3351d4d0-c886-4477-bc07-427cc064b4f7" (UID: "3351d4d0-c886-4477-bc07-427cc064b4f7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.042430 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "3351d4d0-c886-4477-bc07-427cc064b4f7" (UID: "3351d4d0-c886-4477-bc07-427cc064b4f7"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.043279 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3351d4d0-c886-4477-bc07-427cc064b4f7-kube-api-access-lwcqm" (OuterVolumeSpecName: "kube-api-access-lwcqm") pod "3351d4d0-c886-4477-bc07-427cc064b4f7" (UID: "3351d4d0-c886-4477-bc07-427cc064b4f7"). InnerVolumeSpecName "kube-api-access-lwcqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.045614 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-scripts" (OuterVolumeSpecName: "scripts") pod "3351d4d0-c886-4477-bc07-427cc064b4f7" (UID: "3351d4d0-c886-4477-bc07-427cc064b4f7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.062093 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3351d4d0-c886-4477-bc07-427cc064b4f7" (UID: "3351d4d0-c886-4477-bc07-427cc064b4f7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.092211 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3351d4d0-c886-4477-bc07-427cc064b4f7" (UID: "3351d4d0-c886-4477-bc07-427cc064b4f7"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.093177 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-config-data" (OuterVolumeSpecName: "config-data") pod "3351d4d0-c886-4477-bc07-427cc064b4f7" (UID: "3351d4d0-c886-4477-bc07-427cc064b4f7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.099108 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "3351d4d0-c886-4477-bc07-427cc064b4f7" (UID: "3351d4d0-c886-4477-bc07-427cc064b4f7"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.111120 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "3351d4d0-c886-4477-bc07-427cc064b4f7" (UID: "3351d4d0-c886-4477-bc07-427cc064b4f7"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.139941 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lwcqm\" (UniqueName: \"kubernetes.io/projected/3351d4d0-c886-4477-bc07-427cc064b4f7-kube-api-access-lwcqm\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.139971 4818 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.139981 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.140063 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3351d4d0-c886-4477-bc07-427cc064b4f7-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.140073 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.140082 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.140090 4818 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-public-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.140122 4818 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-config-data-custom\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.140133 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3351d4d0-c886-4477-bc07-427cc064b4f7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.318812 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:25:51 crc kubenswrapper[4818]: W0930 17:25:51.328152 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6e51b830_4d45_4b0f_8773_40e46ff074ee.slice/crio-fc1f5d1428ca5b569c569cce9ff8f7903e2e6dd3500b68739c01f4cb779479d9 WatchSource:0}: Error finding container fc1f5d1428ca5b569c569cce9ff8f7903e2e6dd3500b68739c01f4cb779479d9: Status 404 returned error can't find the container with id fc1f5d1428ca5b569c569cce9ff8f7903e2e6dd3500b68739c01f4cb779479d9 Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.435793 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6e51b830-4d45-4b0f-8773-40e46ff074ee","Type":"ContainerStarted","Data":"fc1f5d1428ca5b569c569cce9ff8f7903e2e6dd3500b68739c01f4cb779479d9"} Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.440529 4818 generic.go:334] "Generic (PLEG): container finished" podID="3351d4d0-c886-4477-bc07-427cc064b4f7" containerID="1ac70bd903f9edf4488db80a3d44b6874efab86e3c7fd168d369f46252bd60dd" exitCode=0 Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.440584 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"3351d4d0-c886-4477-bc07-427cc064b4f7","Type":"ContainerDied","Data":"1ac70bd903f9edf4488db80a3d44b6874efab86e3c7fd168d369f46252bd60dd"} Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.440604 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-api-0" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.440618 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"3351d4d0-c886-4477-bc07-427cc064b4f7","Type":"ContainerDied","Data":"1a0d0ed4fb7e4d5f1a6eceb5cf495fa35f4e4581642f66069418d0fccf053eee"} Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.440644 4818 scope.go:117] "RemoveContainer" containerID="1ac70bd903f9edf4488db80a3d44b6874efab86e3c7fd168d369f46252bd60dd" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.540408 4818 scope.go:117] "RemoveContainer" containerID="54ad1d586bec7dee1dc0790d87a3daf75422d7d613b74dede8247d7aac7fd5b8" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.545497 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.557236 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.560202 4818 scope.go:117] "RemoveContainer" containerID="1ac70bd903f9edf4488db80a3d44b6874efab86e3c7fd168d369f46252bd60dd" Sep 30 17:25:51 crc kubenswrapper[4818]: E0930 17:25:51.560715 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ac70bd903f9edf4488db80a3d44b6874efab86e3c7fd168d369f46252bd60dd\": container with ID starting with 1ac70bd903f9edf4488db80a3d44b6874efab86e3c7fd168d369f46252bd60dd not found: ID does not exist" containerID="1ac70bd903f9edf4488db80a3d44b6874efab86e3c7fd168d369f46252bd60dd" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.560772 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ac70bd903f9edf4488db80a3d44b6874efab86e3c7fd168d369f46252bd60dd"} err="failed to get container status \"1ac70bd903f9edf4488db80a3d44b6874efab86e3c7fd168d369f46252bd60dd\": rpc error: code = NotFound desc = could not find container \"1ac70bd903f9edf4488db80a3d44b6874efab86e3c7fd168d369f46252bd60dd\": container with ID starting with 1ac70bd903f9edf4488db80a3d44b6874efab86e3c7fd168d369f46252bd60dd not found: ID does not exist" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.560807 4818 scope.go:117] "RemoveContainer" containerID="54ad1d586bec7dee1dc0790d87a3daf75422d7d613b74dede8247d7aac7fd5b8" Sep 30 17:25:51 crc kubenswrapper[4818]: E0930 17:25:51.561300 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54ad1d586bec7dee1dc0790d87a3daf75422d7d613b74dede8247d7aac7fd5b8\": container with ID starting with 54ad1d586bec7dee1dc0790d87a3daf75422d7d613b74dede8247d7aac7fd5b8 not found: ID does not exist" containerID="54ad1d586bec7dee1dc0790d87a3daf75422d7d613b74dede8247d7aac7fd5b8" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.561329 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54ad1d586bec7dee1dc0790d87a3daf75422d7d613b74dede8247d7aac7fd5b8"} err="failed to get container status \"54ad1d586bec7dee1dc0790d87a3daf75422d7d613b74dede8247d7aac7fd5b8\": rpc error: code = NotFound desc = could not find container \"54ad1d586bec7dee1dc0790d87a3daf75422d7d613b74dede8247d7aac7fd5b8\": container with ID starting with 54ad1d586bec7dee1dc0790d87a3daf75422d7d613b74dede8247d7aac7fd5b8 not found: ID does not exist" Sep 30 17:25:51 crc kubenswrapper[4818]: I0930 17:25:51.824015 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_d08329e8-65f4-466c-aa9a-e1f488b8446e/watcher-decision-engine/0.log" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.018364 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.043244 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f9a9728-8755-4c14-bb7d-d8ae57219dfd" path="/var/lib/kubelet/pods/0f9a9728-8755-4c14-bb7d-d8ae57219dfd/volumes" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.044491 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3351d4d0-c886-4477-bc07-427cc064b4f7" path="/var/lib/kubelet/pods/3351d4d0-c886-4477-bc07-427cc064b4f7/volumes" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.055623 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-config-data-custom\") pod \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.055652 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-var-locks-brick\") pod \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.055668 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-cert-memcached-mtls\") pod \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.055688 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-config-data\") pod \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.055756 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-76jm9\" (UniqueName: \"kubernetes.io/projected/d51d078b-04c1-4ea7-9411-68e8e8f9f160-kube-api-access-76jm9\") pod \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.055776 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-dev\") pod \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.055793 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-etc-machine-id\") pod \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.055832 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-combined-ca-bundle\") pod \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.055862 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-lib-modules\") pod \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.055890 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-scripts\") pod \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.055933 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-var-lib-cinder\") pod \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.055992 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-etc-nvme\") pod \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.056011 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-etc-iscsi\") pod \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.056042 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-var-locks-cinder\") pod \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.056071 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-sys\") pod \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.056092 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-run\") pod \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.057585 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "d51d078b-04c1-4ea7-9411-68e8e8f9f160" (UID: "d51d078b-04c1-4ea7-9411-68e8e8f9f160"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.057620 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "d51d078b-04c1-4ea7-9411-68e8e8f9f160" (UID: "d51d078b-04c1-4ea7-9411-68e8e8f9f160"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.057710 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "d51d078b-04c1-4ea7-9411-68e8e8f9f160" (UID: "d51d078b-04c1-4ea7-9411-68e8e8f9f160"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.064242 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-var-locks-cinder" (OuterVolumeSpecName: "var-locks-cinder") pod "d51d078b-04c1-4ea7-9411-68e8e8f9f160" (UID: "d51d078b-04c1-4ea7-9411-68e8e8f9f160"). InnerVolumeSpecName "var-locks-cinder". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.064301 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-sys" (OuterVolumeSpecName: "sys") pod "d51d078b-04c1-4ea7-9411-68e8e8f9f160" (UID: "d51d078b-04c1-4ea7-9411-68e8e8f9f160"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.064320 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-run" (OuterVolumeSpecName: "run") pod "d51d078b-04c1-4ea7-9411-68e8e8f9f160" (UID: "d51d078b-04c1-4ea7-9411-68e8e8f9f160"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.064639 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-var-lib-cinder" (OuterVolumeSpecName: "var-lib-cinder") pod "d51d078b-04c1-4ea7-9411-68e8e8f9f160" (UID: "d51d078b-04c1-4ea7-9411-68e8e8f9f160"). InnerVolumeSpecName "var-lib-cinder". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.064661 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-dev" (OuterVolumeSpecName: "dev") pod "d51d078b-04c1-4ea7-9411-68e8e8f9f160" (UID: "d51d078b-04c1-4ea7-9411-68e8e8f9f160"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.064734 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "d51d078b-04c1-4ea7-9411-68e8e8f9f160" (UID: "d51d078b-04c1-4ea7-9411-68e8e8f9f160"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.064763 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d51d078b-04c1-4ea7-9411-68e8e8f9f160" (UID: "d51d078b-04c1-4ea7-9411-68e8e8f9f160"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.065840 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-scripts" (OuterVolumeSpecName: "scripts") pod "d51d078b-04c1-4ea7-9411-68e8e8f9f160" (UID: "d51d078b-04c1-4ea7-9411-68e8e8f9f160"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.070266 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d51d078b-04c1-4ea7-9411-68e8e8f9f160" (UID: "d51d078b-04c1-4ea7-9411-68e8e8f9f160"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.074872 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d51d078b-04c1-4ea7-9411-68e8e8f9f160-kube-api-access-76jm9" (OuterVolumeSpecName: "kube-api-access-76jm9") pod "d51d078b-04c1-4ea7-9411-68e8e8f9f160" (UID: "d51d078b-04c1-4ea7-9411-68e8e8f9f160"). InnerVolumeSpecName "kube-api-access-76jm9". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.157324 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d51d078b-04c1-4ea7-9411-68e8e8f9f160" (UID: "d51d078b-04c1-4ea7-9411-68e8e8f9f160"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.158094 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-combined-ca-bundle\") pod \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\" (UID: \"d51d078b-04c1-4ea7-9411-68e8e8f9f160\") " Sep 30 17:25:52 crc kubenswrapper[4818]: W0930 17:25:52.158255 4818 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/d51d078b-04c1-4ea7-9411-68e8e8f9f160/volumes/kubernetes.io~secret/combined-ca-bundle Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.158281 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d51d078b-04c1-4ea7-9411-68e8e8f9f160" (UID: "d51d078b-04c1-4ea7-9411-68e8e8f9f160"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.158945 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.158966 4818 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-lib-modules\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.158986 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.159000 4818 reconciler_common.go:293] "Volume detached for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-var-lib-cinder\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.159012 4818 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-etc-nvme\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.159022 4818 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-etc-iscsi\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.159032 4818 reconciler_common.go:293] "Volume detached for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-var-locks-cinder\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.159043 4818 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-sys\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.159053 4818 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-run\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.159064 4818 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-config-data-custom\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.159074 4818 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-var-locks-brick\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.159086 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-76jm9\" (UniqueName: \"kubernetes.io/projected/d51d078b-04c1-4ea7-9411-68e8e8f9f160-kube-api-access-76jm9\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.159098 4818 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-dev\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.159108 4818 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d51d078b-04c1-4ea7-9411-68e8e8f9f160-etc-machine-id\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.175235 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.211982 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-config-data" (OuterVolumeSpecName: "config-data") pod "d51d078b-04c1-4ea7-9411-68e8e8f9f160" (UID: "d51d078b-04c1-4ea7-9411-68e8e8f9f160"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.251012 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "d51d078b-04c1-4ea7-9411-68e8e8f9f160" (UID: "d51d078b-04c1-4ea7-9411-68e8e8f9f160"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.260541 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-config-data\") pod \"34dccb04-d413-4e39-b2c9-87bcec31e790\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.260588 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-cert-memcached-mtls\") pod \"34dccb04-d413-4e39-b2c9-87bcec31e790\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.260621 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-config-data-custom\") pod \"34dccb04-d413-4e39-b2c9-87bcec31e790\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.260648 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9x92t\" (UniqueName: \"kubernetes.io/projected/34dccb04-d413-4e39-b2c9-87bcec31e790-kube-api-access-9x92t\") pod \"34dccb04-d413-4e39-b2c9-87bcec31e790\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.260713 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/34dccb04-d413-4e39-b2c9-87bcec31e790-etc-machine-id\") pod \"34dccb04-d413-4e39-b2c9-87bcec31e790\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.260761 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-combined-ca-bundle\") pod \"34dccb04-d413-4e39-b2c9-87bcec31e790\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.260810 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-scripts\") pod \"34dccb04-d413-4e39-b2c9-87bcec31e790\" (UID: \"34dccb04-d413-4e39-b2c9-87bcec31e790\") " Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.261112 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.261124 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d51d078b-04c1-4ea7-9411-68e8e8f9f160-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.262605 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/34dccb04-d413-4e39-b2c9-87bcec31e790-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "34dccb04-d413-4e39-b2c9-87bcec31e790" (UID: "34dccb04-d413-4e39-b2c9-87bcec31e790"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.264181 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-scripts" (OuterVolumeSpecName: "scripts") pod "34dccb04-d413-4e39-b2c9-87bcec31e790" (UID: "34dccb04-d413-4e39-b2c9-87bcec31e790"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.264507 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "34dccb04-d413-4e39-b2c9-87bcec31e790" (UID: "34dccb04-d413-4e39-b2c9-87bcec31e790"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.265642 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34dccb04-d413-4e39-b2c9-87bcec31e790-kube-api-access-9x92t" (OuterVolumeSpecName: "kube-api-access-9x92t") pod "34dccb04-d413-4e39-b2c9-87bcec31e790" (UID: "34dccb04-d413-4e39-b2c9-87bcec31e790"). InnerVolumeSpecName "kube-api-access-9x92t". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.309851 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "34dccb04-d413-4e39-b2c9-87bcec31e790" (UID: "34dccb04-d413-4e39-b2c9-87bcec31e790"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.347461 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-config-data" (OuterVolumeSpecName: "config-data") pod "34dccb04-d413-4e39-b2c9-87bcec31e790" (UID: "34dccb04-d413-4e39-b2c9-87bcec31e790"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.362199 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.362228 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.362238 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.362246 4818 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-config-data-custom\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.362255 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9x92t\" (UniqueName: \"kubernetes.io/projected/34dccb04-d413-4e39-b2c9-87bcec31e790-kube-api-access-9x92t\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.362264 4818 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/34dccb04-d413-4e39-b2c9-87bcec31e790-etc-machine-id\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.379245 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "34dccb04-d413-4e39-b2c9-87bcec31e790" (UID: "34dccb04-d413-4e39-b2c9-87bcec31e790"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.448999 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6e51b830-4d45-4b0f-8773-40e46ff074ee","Type":"ContainerStarted","Data":"24e4b54bccbb43e81db28f431ce045404d6719d6930136e59a692023089df235"} Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.453705 4818 generic.go:334] "Generic (PLEG): container finished" podID="34dccb04-d413-4e39-b2c9-87bcec31e790" containerID="b2bd0e3ab6389304c9a732921abbba0eb28f49235eee3bc29520f7122ffe8da8" exitCode=0 Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.453752 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"34dccb04-d413-4e39-b2c9-87bcec31e790","Type":"ContainerDied","Data":"b2bd0e3ab6389304c9a732921abbba0eb28f49235eee3bc29520f7122ffe8da8"} Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.453773 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"34dccb04-d413-4e39-b2c9-87bcec31e790","Type":"ContainerDied","Data":"e05f41bc4090d174cd0c3e8f5ad2d66648a665f60abba466360409403034068f"} Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.453790 4818 scope.go:117] "RemoveContainer" containerID="45fabd2d546a6f347495c79461371387123bd87a09e0d152b779503edae7933a" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.453877 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-scheduler-0" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.464452 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/34dccb04-d413-4e39-b2c9-87bcec31e790-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.466139 4818 generic.go:334] "Generic (PLEG): container finished" podID="d51d078b-04c1-4ea7-9411-68e8e8f9f160" containerID="2da642d9159434effe1f7ba26059b49004b8f48e5b3a6fd42f4243d8b1f1f910" exitCode=0 Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.466178 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"d51d078b-04c1-4ea7-9411-68e8e8f9f160","Type":"ContainerDied","Data":"2da642d9159434effe1f7ba26059b49004b8f48e5b3a6fd42f4243d8b1f1f910"} Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.466204 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"d51d078b-04c1-4ea7-9411-68e8e8f9f160","Type":"ContainerDied","Data":"af750bdfbf7c9338c17596f2506335c2adcb0fb972093601b1bb1040008baca6"} Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.466256 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-backup-0" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.552869 4818 scope.go:117] "RemoveContainer" containerID="b2bd0e3ab6389304c9a732921abbba0eb28f49235eee3bc29520f7122ffe8da8" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.576247 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.579372 4818 scope.go:117] "RemoveContainer" containerID="45fabd2d546a6f347495c79461371387123bd87a09e0d152b779503edae7933a" Sep 30 17:25:52 crc kubenswrapper[4818]: E0930 17:25:52.579970 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45fabd2d546a6f347495c79461371387123bd87a09e0d152b779503edae7933a\": container with ID starting with 45fabd2d546a6f347495c79461371387123bd87a09e0d152b779503edae7933a not found: ID does not exist" containerID="45fabd2d546a6f347495c79461371387123bd87a09e0d152b779503edae7933a" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.580007 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45fabd2d546a6f347495c79461371387123bd87a09e0d152b779503edae7933a"} err="failed to get container status \"45fabd2d546a6f347495c79461371387123bd87a09e0d152b779503edae7933a\": rpc error: code = NotFound desc = could not find container \"45fabd2d546a6f347495c79461371387123bd87a09e0d152b779503edae7933a\": container with ID starting with 45fabd2d546a6f347495c79461371387123bd87a09e0d152b779503edae7933a not found: ID does not exist" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.580031 4818 scope.go:117] "RemoveContainer" containerID="b2bd0e3ab6389304c9a732921abbba0eb28f49235eee3bc29520f7122ffe8da8" Sep 30 17:25:52 crc kubenswrapper[4818]: E0930 17:25:52.581288 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2bd0e3ab6389304c9a732921abbba0eb28f49235eee3bc29520f7122ffe8da8\": container with ID starting with b2bd0e3ab6389304c9a732921abbba0eb28f49235eee3bc29520f7122ffe8da8 not found: ID does not exist" containerID="b2bd0e3ab6389304c9a732921abbba0eb28f49235eee3bc29520f7122ffe8da8" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.581314 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2bd0e3ab6389304c9a732921abbba0eb28f49235eee3bc29520f7122ffe8da8"} err="failed to get container status \"b2bd0e3ab6389304c9a732921abbba0eb28f49235eee3bc29520f7122ffe8da8\": rpc error: code = NotFound desc = could not find container \"b2bd0e3ab6389304c9a732921abbba0eb28f49235eee3bc29520f7122ffe8da8\": container with ID starting with b2bd0e3ab6389304c9a732921abbba0eb28f49235eee3bc29520f7122ffe8da8 not found: ID does not exist" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.581328 4818 scope.go:117] "RemoveContainer" containerID="122b5438074117eae406345c0aaf52c85632d2ba2737a144486e0957b93f7738" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.583576 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.599405 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.600777 4818 scope.go:117] "RemoveContainer" containerID="2da642d9159434effe1f7ba26059b49004b8f48e5b3a6fd42f4243d8b1f1f910" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.607799 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.620911 4818 scope.go:117] "RemoveContainer" containerID="122b5438074117eae406345c0aaf52c85632d2ba2737a144486e0957b93f7738" Sep 30 17:25:52 crc kubenswrapper[4818]: E0930 17:25:52.621341 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"122b5438074117eae406345c0aaf52c85632d2ba2737a144486e0957b93f7738\": container with ID starting with 122b5438074117eae406345c0aaf52c85632d2ba2737a144486e0957b93f7738 not found: ID does not exist" containerID="122b5438074117eae406345c0aaf52c85632d2ba2737a144486e0957b93f7738" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.621373 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"122b5438074117eae406345c0aaf52c85632d2ba2737a144486e0957b93f7738"} err="failed to get container status \"122b5438074117eae406345c0aaf52c85632d2ba2737a144486e0957b93f7738\": rpc error: code = NotFound desc = could not find container \"122b5438074117eae406345c0aaf52c85632d2ba2737a144486e0957b93f7738\": container with ID starting with 122b5438074117eae406345c0aaf52c85632d2ba2737a144486e0957b93f7738 not found: ID does not exist" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.621401 4818 scope.go:117] "RemoveContainer" containerID="2da642d9159434effe1f7ba26059b49004b8f48e5b3a6fd42f4243d8b1f1f910" Sep 30 17:25:52 crc kubenswrapper[4818]: E0930 17:25:52.621619 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2da642d9159434effe1f7ba26059b49004b8f48e5b3a6fd42f4243d8b1f1f910\": container with ID starting with 2da642d9159434effe1f7ba26059b49004b8f48e5b3a6fd42f4243d8b1f1f910 not found: ID does not exist" containerID="2da642d9159434effe1f7ba26059b49004b8f48e5b3a6fd42f4243d8b1f1f910" Sep 30 17:25:52 crc kubenswrapper[4818]: I0930 17:25:52.621642 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2da642d9159434effe1f7ba26059b49004b8f48e5b3a6fd42f4243d8b1f1f910"} err="failed to get container status \"2da642d9159434effe1f7ba26059b49004b8f48e5b3a6fd42f4243d8b1f1f910\": rpc error: code = NotFound desc = could not find container \"2da642d9159434effe1f7ba26059b49004b8f48e5b3a6fd42f4243d8b1f1f910\": container with ID starting with 2da642d9159434effe1f7ba26059b49004b8f48e5b3a6fd42f4243d8b1f1f910 not found: ID does not exist" Sep 30 17:25:53 crc kubenswrapper[4818]: I0930 17:25:53.040603 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_d08329e8-65f4-466c-aa9a-e1f488b8446e/watcher-decision-engine/0.log" Sep 30 17:25:53 crc kubenswrapper[4818]: I0930 17:25:53.481041 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6e51b830-4d45-4b0f-8773-40e46ff074ee","Type":"ContainerStarted","Data":"0d26a9458d54df82fbe8e5699d1c968785a312fb9d7034dc562ddabecb1f2371"} Sep 30 17:25:54 crc kubenswrapper[4818]: I0930 17:25:54.047903 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34dccb04-d413-4e39-b2c9-87bcec31e790" path="/var/lib/kubelet/pods/34dccb04-d413-4e39-b2c9-87bcec31e790/volumes" Sep 30 17:25:54 crc kubenswrapper[4818]: I0930 17:25:54.049142 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d51d078b-04c1-4ea7-9411-68e8e8f9f160" path="/var/lib/kubelet/pods/d51d078b-04c1-4ea7-9411-68e8e8f9f160/volumes" Sep 30 17:25:54 crc kubenswrapper[4818]: I0930 17:25:54.231759 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_d08329e8-65f4-466c-aa9a-e1f488b8446e/watcher-decision-engine/0.log" Sep 30 17:25:54 crc kubenswrapper[4818]: I0930 17:25:54.494334 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6e51b830-4d45-4b0f-8773-40e46ff074ee","Type":"ContainerStarted","Data":"3fb0b9527c05edad2969524ccab413ebfcfaaa133c35f12e0586251d7457837d"} Sep 30 17:25:55 crc kubenswrapper[4818]: I0930 17:25:55.415343 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_d08329e8-65f4-466c-aa9a-e1f488b8446e/watcher-decision-engine/0.log" Sep 30 17:25:55 crc kubenswrapper[4818]: I0930 17:25:55.505279 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6e51b830-4d45-4b0f-8773-40e46ff074ee","Type":"ContainerStarted","Data":"99e80bfe95ea6402c7aee07a619e64b57d000ad8da9e9320b37a10a36e5181af"} Sep 30 17:25:55 crc kubenswrapper[4818]: I0930 17:25:55.505476 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:25:55 crc kubenswrapper[4818]: I0930 17:25:55.527036 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=1.794519883 podStartE2EDuration="5.527020747s" podCreationTimestamp="2025-09-30 17:25:50 +0000 UTC" firstStartedPulling="2025-09-30 17:25:51.332858433 +0000 UTC m=+1598.087130249" lastFinishedPulling="2025-09-30 17:25:55.065359297 +0000 UTC m=+1601.819631113" observedRunningTime="2025-09-30 17:25:55.525909787 +0000 UTC m=+1602.280181623" watchObservedRunningTime="2025-09-30 17:25:55.527020747 +0000 UTC m=+1602.281292563" Sep 30 17:25:56 crc kubenswrapper[4818]: I0930 17:25:56.025624 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:25:56 crc kubenswrapper[4818]: E0930 17:25:56.025842 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:25:56 crc kubenswrapper[4818]: I0930 17:25:56.625760 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_d08329e8-65f4-466c-aa9a-e1f488b8446e/watcher-decision-engine/0.log" Sep 30 17:25:57 crc kubenswrapper[4818]: I0930 17:25:57.830748 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_d08329e8-65f4-466c-aa9a-e1f488b8446e/watcher-decision-engine/0.log" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.168508 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.258030 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-config-data\") pod \"d08329e8-65f4-466c-aa9a-e1f488b8446e\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.258098 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-combined-ca-bundle\") pod \"d08329e8-65f4-466c-aa9a-e1f488b8446e\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.258242 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d08329e8-65f4-466c-aa9a-e1f488b8446e-logs\") pod \"d08329e8-65f4-466c-aa9a-e1f488b8446e\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.258268 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-custom-prometheus-ca\") pod \"d08329e8-65f4-466c-aa9a-e1f488b8446e\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.258588 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d08329e8-65f4-466c-aa9a-e1f488b8446e-logs" (OuterVolumeSpecName: "logs") pod "d08329e8-65f4-466c-aa9a-e1f488b8446e" (UID: "d08329e8-65f4-466c-aa9a-e1f488b8446e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.258718 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-cert-memcached-mtls\") pod \"d08329e8-65f4-466c-aa9a-e1f488b8446e\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.259144 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7488\" (UniqueName: \"kubernetes.io/projected/d08329e8-65f4-466c-aa9a-e1f488b8446e-kube-api-access-c7488\") pod \"d08329e8-65f4-466c-aa9a-e1f488b8446e\" (UID: \"d08329e8-65f4-466c-aa9a-e1f488b8446e\") " Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.259529 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d08329e8-65f4-466c-aa9a-e1f488b8446e-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.266782 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d08329e8-65f4-466c-aa9a-e1f488b8446e-kube-api-access-c7488" (OuterVolumeSpecName: "kube-api-access-c7488") pod "d08329e8-65f4-466c-aa9a-e1f488b8446e" (UID: "d08329e8-65f4-466c-aa9a-e1f488b8446e"). InnerVolumeSpecName "kube-api-access-c7488". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.284054 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "d08329e8-65f4-466c-aa9a-e1f488b8446e" (UID: "d08329e8-65f4-466c-aa9a-e1f488b8446e"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.284089 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d08329e8-65f4-466c-aa9a-e1f488b8446e" (UID: "d08329e8-65f4-466c-aa9a-e1f488b8446e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.304947 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-config-data" (OuterVolumeSpecName: "config-data") pod "d08329e8-65f4-466c-aa9a-e1f488b8446e" (UID: "d08329e8-65f4-466c-aa9a-e1f488b8446e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.322085 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "d08329e8-65f4-466c-aa9a-e1f488b8446e" (UID: "d08329e8-65f4-466c-aa9a-e1f488b8446e"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.360794 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.360825 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7488\" (UniqueName: \"kubernetes.io/projected/d08329e8-65f4-466c-aa9a-e1f488b8446e-kube-api-access-c7488\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.360835 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.360843 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.360854 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/d08329e8-65f4-466c-aa9a-e1f488b8446e-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.538373 4818 generic.go:334] "Generic (PLEG): container finished" podID="d08329e8-65f4-466c-aa9a-e1f488b8446e" containerID="ce3ad6efab22e9579cb3fc069743bb90b200a9fb6e0a2ce032c8cc9753ddd020" exitCode=0 Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.538666 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"d08329e8-65f4-466c-aa9a-e1f488b8446e","Type":"ContainerDied","Data":"ce3ad6efab22e9579cb3fc069743bb90b200a9fb6e0a2ce032c8cc9753ddd020"} Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.538785 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"d08329e8-65f4-466c-aa9a-e1f488b8446e","Type":"ContainerDied","Data":"d550e6db07b12f6af01906bcde3cc7ad8cd2b747983a3fd92fa25b812c29bea8"} Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.538514 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.538840 4818 scope.go:117] "RemoveContainer" containerID="ce3ad6efab22e9579cb3fc069743bb90b200a9fb6e0a2ce032c8cc9753ddd020" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.608577 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.619202 4818 scope.go:117] "RemoveContainer" containerID="ce3ad6efab22e9579cb3fc069743bb90b200a9fb6e0a2ce032c8cc9753ddd020" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.619646 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:25:58 crc kubenswrapper[4818]: E0930 17:25:58.624082 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce3ad6efab22e9579cb3fc069743bb90b200a9fb6e0a2ce032c8cc9753ddd020\": container with ID starting with ce3ad6efab22e9579cb3fc069743bb90b200a9fb6e0a2ce032c8cc9753ddd020 not found: ID does not exist" containerID="ce3ad6efab22e9579cb3fc069743bb90b200a9fb6e0a2ce032c8cc9753ddd020" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.624145 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce3ad6efab22e9579cb3fc069743bb90b200a9fb6e0a2ce032c8cc9753ddd020"} err="failed to get container status \"ce3ad6efab22e9579cb3fc069743bb90b200a9fb6e0a2ce032c8cc9753ddd020\": rpc error: code = NotFound desc = could not find container \"ce3ad6efab22e9579cb3fc069743bb90b200a9fb6e0a2ce032c8cc9753ddd020\": container with ID starting with ce3ad6efab22e9579cb3fc069743bb90b200a9fb6e0a2ce032c8cc9753ddd020 not found: ID does not exist" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.635250 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:25:58 crc kubenswrapper[4818]: E0930 17:25:58.635720 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d51d078b-04c1-4ea7-9411-68e8e8f9f160" containerName="cinder-backup" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.635738 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="d51d078b-04c1-4ea7-9411-68e8e8f9f160" containerName="cinder-backup" Sep 30 17:25:58 crc kubenswrapper[4818]: E0930 17:25:58.635758 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d51d078b-04c1-4ea7-9411-68e8e8f9f160" containerName="probe" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.635767 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="d51d078b-04c1-4ea7-9411-68e8e8f9f160" containerName="probe" Sep 30 17:25:58 crc kubenswrapper[4818]: E0930 17:25:58.635780 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34dccb04-d413-4e39-b2c9-87bcec31e790" containerName="cinder-scheduler" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.635789 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="34dccb04-d413-4e39-b2c9-87bcec31e790" containerName="cinder-scheduler" Sep 30 17:25:58 crc kubenswrapper[4818]: E0930 17:25:58.635806 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d08329e8-65f4-466c-aa9a-e1f488b8446e" containerName="watcher-decision-engine" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.635814 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="d08329e8-65f4-466c-aa9a-e1f488b8446e" containerName="watcher-decision-engine" Sep 30 17:25:58 crc kubenswrapper[4818]: E0930 17:25:58.635831 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3351d4d0-c886-4477-bc07-427cc064b4f7" containerName="cinder-api-log" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.635839 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="3351d4d0-c886-4477-bc07-427cc064b4f7" containerName="cinder-api-log" Sep 30 17:25:58 crc kubenswrapper[4818]: E0930 17:25:58.635868 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34dccb04-d413-4e39-b2c9-87bcec31e790" containerName="probe" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.635876 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="34dccb04-d413-4e39-b2c9-87bcec31e790" containerName="probe" Sep 30 17:25:58 crc kubenswrapper[4818]: E0930 17:25:58.635893 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3351d4d0-c886-4477-bc07-427cc064b4f7" containerName="cinder-api" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.635902 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="3351d4d0-c886-4477-bc07-427cc064b4f7" containerName="cinder-api" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.636178 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="d51d078b-04c1-4ea7-9411-68e8e8f9f160" containerName="probe" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.636196 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="d08329e8-65f4-466c-aa9a-e1f488b8446e" containerName="watcher-decision-engine" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.636206 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="3351d4d0-c886-4477-bc07-427cc064b4f7" containerName="cinder-api" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.636223 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="d51d078b-04c1-4ea7-9411-68e8e8f9f160" containerName="cinder-backup" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.636238 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="34dccb04-d413-4e39-b2c9-87bcec31e790" containerName="probe" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.636251 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="34dccb04-d413-4e39-b2c9-87bcec31e790" containerName="cinder-scheduler" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.636262 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="3351d4d0-c886-4477-bc07-427cc064b4f7" containerName="cinder-api-log" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.637024 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.644153 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.649241 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.668322 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.668407 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.668442 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wl5hb\" (UniqueName: \"kubernetes.io/projected/ece8baa6-5717-48d9-acca-3b01adcd52c7-kube-api-access-wl5hb\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.668567 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ece8baa6-5717-48d9-acca-3b01adcd52c7-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.668652 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.668677 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.770254 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.770317 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.770336 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wl5hb\" (UniqueName: \"kubernetes.io/projected/ece8baa6-5717-48d9-acca-3b01adcd52c7-kube-api-access-wl5hb\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.770357 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ece8baa6-5717-48d9-acca-3b01adcd52c7-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.770380 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.770394 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.771753 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ece8baa6-5717-48d9-acca-3b01adcd52c7-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.774896 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.785183 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.787975 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wl5hb\" (UniqueName: \"kubernetes.io/projected/ece8baa6-5717-48d9-acca-3b01adcd52c7-kube-api-access-wl5hb\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.794696 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.795484 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:58 crc kubenswrapper[4818]: I0930 17:25:58.958346 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:25:59 crc kubenswrapper[4818]: I0930 17:25:59.431420 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:25:59 crc kubenswrapper[4818]: I0930 17:25:59.549815 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"ece8baa6-5717-48d9-acca-3b01adcd52c7","Type":"ContainerStarted","Data":"a771ebc021833ebd6734503ceff26824a7123646b540deeb2fa067dc95193d65"} Sep 30 17:26:00 crc kubenswrapper[4818]: I0930 17:26:00.033352 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d08329e8-65f4-466c-aa9a-e1f488b8446e" path="/var/lib/kubelet/pods/d08329e8-65f4-466c-aa9a-e1f488b8446e/volumes" Sep 30 17:26:00 crc kubenswrapper[4818]: I0930 17:26:00.563438 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"ece8baa6-5717-48d9-acca-3b01adcd52c7","Type":"ContainerStarted","Data":"89fb1461ee943e21ce0cae0da63e60206b0104c82c883623f9bc4d06ae4d41d5"} Sep 30 17:26:01 crc kubenswrapper[4818]: I0930 17:26:01.336728 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_ece8baa6-5717-48d9-acca-3b01adcd52c7/watcher-decision-engine/0.log" Sep 30 17:26:02 crc kubenswrapper[4818]: I0930 17:26:02.580624 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_ece8baa6-5717-48d9-acca-3b01adcd52c7/watcher-decision-engine/0.log" Sep 30 17:26:03 crc kubenswrapper[4818]: I0930 17:26:03.791797 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_ece8baa6-5717-48d9-acca-3b01adcd52c7/watcher-decision-engine/0.log" Sep 30 17:26:05 crc kubenswrapper[4818]: I0930 17:26:05.019632 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_ece8baa6-5717-48d9-acca-3b01adcd52c7/watcher-decision-engine/0.log" Sep 30 17:26:06 crc kubenswrapper[4818]: I0930 17:26:06.196542 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_ece8baa6-5717-48d9-acca-3b01adcd52c7/watcher-decision-engine/0.log" Sep 30 17:26:07 crc kubenswrapper[4818]: I0930 17:26:07.388722 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_ece8baa6-5717-48d9-acca-3b01adcd52c7/watcher-decision-engine/0.log" Sep 30 17:26:08 crc kubenswrapper[4818]: I0930 17:26:08.635204 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_ece8baa6-5717-48d9-acca-3b01adcd52c7/watcher-decision-engine/0.log" Sep 30 17:26:08 crc kubenswrapper[4818]: I0930 17:26:08.959591 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:09 crc kubenswrapper[4818]: I0930 17:26:09.004459 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:09 crc kubenswrapper[4818]: I0930 17:26:09.021563 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:26:09 crc kubenswrapper[4818]: E0930 17:26:09.021855 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:26:09 crc kubenswrapper[4818]: I0930 17:26:09.035421 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=11.035390763 podStartE2EDuration="11.035390763s" podCreationTimestamp="2025-09-30 17:25:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:26:00.588766141 +0000 UTC m=+1607.343037967" watchObservedRunningTime="2025-09-30 17:26:09.035390763 +0000 UTC m=+1615.789662619" Sep 30 17:26:09 crc kubenswrapper[4818]: I0930 17:26:09.649625 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:09 crc kubenswrapper[4818]: I0930 17:26:09.674563 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:09 crc kubenswrapper[4818]: I0930 17:26:09.831416 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_ece8baa6-5717-48d9-acca-3b01adcd52c7/watcher-decision-engine/0.log" Sep 30 17:26:11 crc kubenswrapper[4818]: I0930 17:26:11.046349 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_ece8baa6-5717-48d9-acca-3b01adcd52c7/watcher-decision-engine/0.log" Sep 30 17:26:11 crc kubenswrapper[4818]: I0930 17:26:11.211635 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs"] Sep 30 17:26:11 crc kubenswrapper[4818]: I0930 17:26:11.216715 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-cbbxs"] Sep 30 17:26:11 crc kubenswrapper[4818]: I0930 17:26:11.236121 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher8330-account-delete-r2hzd"] Sep 30 17:26:11 crc kubenswrapper[4818]: I0930 17:26:11.237358 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher8330-account-delete-r2hzd" Sep 30 17:26:11 crc kubenswrapper[4818]: I0930 17:26:11.250702 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher8330-account-delete-r2hzd"] Sep 30 17:26:11 crc kubenswrapper[4818]: I0930 17:26:11.282442 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:26:11 crc kubenswrapper[4818]: I0930 17:26:11.345063 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:26:11 crc kubenswrapper[4818]: I0930 17:26:11.345319 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="a7178784-85af-469a-8b70-b6949f6580a4" containerName="watcher-applier" containerID="cri-o://730e89915686204547bd5406635b878a0c4d01f7e60c039d4ccf0dd7ba653fb7" gracePeriod=30 Sep 30 17:26:11 crc kubenswrapper[4818]: I0930 17:26:11.368127 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:26:11 crc kubenswrapper[4818]: I0930 17:26:11.368335 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="f5634d48-92cc-4cb1-9009-5bcd5ad54179" containerName="watcher-kuttl-api-log" containerID="cri-o://11339a2da1384ee752135d142d8c7051e0cbebc5fa9d417cf308e901d45a9265" gracePeriod=30 Sep 30 17:26:11 crc kubenswrapper[4818]: I0930 17:26:11.368676 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="f5634d48-92cc-4cb1-9009-5bcd5ad54179" containerName="watcher-api" containerID="cri-o://f957015d64e75cb3a2ebcdc628dbad48ff6a048cffa6abecd35687d0a0d19a96" gracePeriod=30 Sep 30 17:26:11 crc kubenswrapper[4818]: I0930 17:26:11.395818 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9h9k\" (UniqueName: \"kubernetes.io/projected/1810722e-7a70-47c6-9fe4-2defb415ced8-kube-api-access-p9h9k\") pod \"watcher8330-account-delete-r2hzd\" (UID: \"1810722e-7a70-47c6-9fe4-2defb415ced8\") " pod="watcher-kuttl-default/watcher8330-account-delete-r2hzd" Sep 30 17:26:11 crc kubenswrapper[4818]: I0930 17:26:11.497072 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9h9k\" (UniqueName: \"kubernetes.io/projected/1810722e-7a70-47c6-9fe4-2defb415ced8-kube-api-access-p9h9k\") pod \"watcher8330-account-delete-r2hzd\" (UID: \"1810722e-7a70-47c6-9fe4-2defb415ced8\") " pod="watcher-kuttl-default/watcher8330-account-delete-r2hzd" Sep 30 17:26:11 crc kubenswrapper[4818]: I0930 17:26:11.517198 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9h9k\" (UniqueName: \"kubernetes.io/projected/1810722e-7a70-47c6-9fe4-2defb415ced8-kube-api-access-p9h9k\") pod \"watcher8330-account-delete-r2hzd\" (UID: \"1810722e-7a70-47c6-9fe4-2defb415ced8\") " pod="watcher-kuttl-default/watcher8330-account-delete-r2hzd" Sep 30 17:26:11 crc kubenswrapper[4818]: I0930 17:26:11.560268 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher8330-account-delete-r2hzd" Sep 30 17:26:11 crc kubenswrapper[4818]: I0930 17:26:11.671050 4818 generic.go:334] "Generic (PLEG): container finished" podID="f5634d48-92cc-4cb1-9009-5bcd5ad54179" containerID="11339a2da1384ee752135d142d8c7051e0cbebc5fa9d417cf308e901d45a9265" exitCode=143 Sep 30 17:26:11 crc kubenswrapper[4818]: I0930 17:26:11.671152 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"f5634d48-92cc-4cb1-9009-5bcd5ad54179","Type":"ContainerDied","Data":"11339a2da1384ee752135d142d8c7051e0cbebc5fa9d417cf308e901d45a9265"} Sep 30 17:26:11 crc kubenswrapper[4818]: I0930 17:26:11.671590 4818 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" secret="" err="secret \"watcher-watcher-kuttl-dockercfg-9sl6v\" not found" Sep 30 17:26:11 crc kubenswrapper[4818]: E0930 17:26:11.702193 4818 secret.go:188] Couldn't get secret watcher-kuttl-default/watcher-kuttl-decision-engine-config-data: secret "watcher-kuttl-decision-engine-config-data" not found Sep 30 17:26:11 crc kubenswrapper[4818]: E0930 17:26:11.702463 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-config-data podName:ece8baa6-5717-48d9-acca-3b01adcd52c7 nodeName:}" failed. No retries permitted until 2025-09-30 17:26:12.202445635 +0000 UTC m=+1618.956717451 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-config-data") pod "watcher-kuttl-decision-engine-0" (UID: "ece8baa6-5717-48d9-acca-3b01adcd52c7") : secret "watcher-kuttl-decision-engine-config-data" not found Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.031704 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="918ea858-4ad5-46f8-a917-608a55a2a80b" path="/var/lib/kubelet/pods/918ea858-4ad5-46f8-a917-608a55a2a80b/volumes" Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.094093 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher8330-account-delete-r2hzd"] Sep 30 17:26:12 crc kubenswrapper[4818]: E0930 17:26:12.215146 4818 secret.go:188] Couldn't get secret watcher-kuttl-default/watcher-kuttl-decision-engine-config-data: secret "watcher-kuttl-decision-engine-config-data" not found Sep 30 17:26:12 crc kubenswrapper[4818]: E0930 17:26:12.215209 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-config-data podName:ece8baa6-5717-48d9-acca-3b01adcd52c7 nodeName:}" failed. No retries permitted until 2025-09-30 17:26:13.215191806 +0000 UTC m=+1619.969463622 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-config-data") pod "watcher-kuttl-decision-engine-0" (UID: "ece8baa6-5717-48d9-acca-3b01adcd52c7") : secret "watcher-kuttl-decision-engine-config-data" not found Sep 30 17:26:12 crc kubenswrapper[4818]: E0930 17:26:12.611276 4818 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda7178784_85af_469a_8b70_b6949f6580a4.slice/crio-conmon-730e89915686204547bd5406635b878a0c4d01f7e60c039d4ccf0dd7ba653fb7.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda7178784_85af_469a_8b70_b6949f6580a4.slice/crio-730e89915686204547bd5406635b878a0c4d01f7e60c039d4ccf0dd7ba653fb7.scope\": RecentStats: unable to find data in memory cache]" Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.690412 4818 generic.go:334] "Generic (PLEG): container finished" podID="f5634d48-92cc-4cb1-9009-5bcd5ad54179" containerID="f957015d64e75cb3a2ebcdc628dbad48ff6a048cffa6abecd35687d0a0d19a96" exitCode=0 Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.690583 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"f5634d48-92cc-4cb1-9009-5bcd5ad54179","Type":"ContainerDied","Data":"f957015d64e75cb3a2ebcdc628dbad48ff6a048cffa6abecd35687d0a0d19a96"} Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.692596 4818 generic.go:334] "Generic (PLEG): container finished" podID="a7178784-85af-469a-8b70-b6949f6580a4" containerID="730e89915686204547bd5406635b878a0c4d01f7e60c039d4ccf0dd7ba653fb7" exitCode=0 Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.692685 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"a7178784-85af-469a-8b70-b6949f6580a4","Type":"ContainerDied","Data":"730e89915686204547bd5406635b878a0c4d01f7e60c039d4ccf0dd7ba653fb7"} Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.697262 4818 generic.go:334] "Generic (PLEG): container finished" podID="1810722e-7a70-47c6-9fe4-2defb415ced8" containerID="748ed8ecf4f69407685aa8d303a631a86821253048aa777f156ab78fb16920e1" exitCode=0 Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.697473 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="ece8baa6-5717-48d9-acca-3b01adcd52c7" containerName="watcher-decision-engine" containerID="cri-o://89fb1461ee943e21ce0cae0da63e60206b0104c82c883623f9bc4d06ae4d41d5" gracePeriod=30 Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.697842 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher8330-account-delete-r2hzd" event={"ID":"1810722e-7a70-47c6-9fe4-2defb415ced8","Type":"ContainerDied","Data":"748ed8ecf4f69407685aa8d303a631a86821253048aa777f156ab78fb16920e1"} Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.697870 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher8330-account-delete-r2hzd" event={"ID":"1810722e-7a70-47c6-9fe4-2defb415ced8","Type":"ContainerStarted","Data":"f0bc7618bdeb90d35781a3b707678e58bfc5c3eefaceac957963e5d4f74f1783"} Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.776127 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.906054 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.930215 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-config-data\") pod \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.930287 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5634d48-92cc-4cb1-9009-5bcd5ad54179-logs\") pod \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.930333 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-custom-prometheus-ca\") pod \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.930453 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-cert-memcached-mtls\") pod \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.930478 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-combined-ca-bundle\") pod \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.930548 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ptvsn\" (UniqueName: \"kubernetes.io/projected/f5634d48-92cc-4cb1-9009-5bcd5ad54179-kube-api-access-ptvsn\") pod \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\" (UID: \"f5634d48-92cc-4cb1-9009-5bcd5ad54179\") " Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.931361 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5634d48-92cc-4cb1-9009-5bcd5ad54179-logs" (OuterVolumeSpecName: "logs") pod "f5634d48-92cc-4cb1-9009-5bcd5ad54179" (UID: "f5634d48-92cc-4cb1-9009-5bcd5ad54179"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.971646 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5634d48-92cc-4cb1-9009-5bcd5ad54179-kube-api-access-ptvsn" (OuterVolumeSpecName: "kube-api-access-ptvsn") pod "f5634d48-92cc-4cb1-9009-5bcd5ad54179" (UID: "f5634d48-92cc-4cb1-9009-5bcd5ad54179"). InnerVolumeSpecName "kube-api-access-ptvsn". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.972213 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "f5634d48-92cc-4cb1-9009-5bcd5ad54179" (UID: "f5634d48-92cc-4cb1-9009-5bcd5ad54179"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:12 crc kubenswrapper[4818]: I0930 17:26:12.979974 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f5634d48-92cc-4cb1-9009-5bcd5ad54179" (UID: "f5634d48-92cc-4cb1-9009-5bcd5ad54179"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.000693 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-config-data" (OuterVolumeSpecName: "config-data") pod "f5634d48-92cc-4cb1-9009-5bcd5ad54179" (UID: "f5634d48-92cc-4cb1-9009-5bcd5ad54179"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.014377 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "f5634d48-92cc-4cb1-9009-5bcd5ad54179" (UID: "f5634d48-92cc-4cb1-9009-5bcd5ad54179"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.032263 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7178784-85af-469a-8b70-b6949f6580a4-combined-ca-bundle\") pod \"a7178784-85af-469a-8b70-b6949f6580a4\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.032312 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7178784-85af-469a-8b70-b6949f6580a4-config-data\") pod \"a7178784-85af-469a-8b70-b6949f6580a4\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.032342 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k45g8\" (UniqueName: \"kubernetes.io/projected/a7178784-85af-469a-8b70-b6949f6580a4-kube-api-access-k45g8\") pod \"a7178784-85af-469a-8b70-b6949f6580a4\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.032374 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7178784-85af-469a-8b70-b6949f6580a4-logs\") pod \"a7178784-85af-469a-8b70-b6949f6580a4\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.032447 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a7178784-85af-469a-8b70-b6949f6580a4-cert-memcached-mtls\") pod \"a7178784-85af-469a-8b70-b6949f6580a4\" (UID: \"a7178784-85af-469a-8b70-b6949f6580a4\") " Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.032755 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.032776 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.032787 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ptvsn\" (UniqueName: \"kubernetes.io/projected/f5634d48-92cc-4cb1-9009-5bcd5ad54179-kube-api-access-ptvsn\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.032798 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.032806 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5634d48-92cc-4cb1-9009-5bcd5ad54179-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.032815 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/f5634d48-92cc-4cb1-9009-5bcd5ad54179-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.036735 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7178784-85af-469a-8b70-b6949f6580a4-kube-api-access-k45g8" (OuterVolumeSpecName: "kube-api-access-k45g8") pod "a7178784-85af-469a-8b70-b6949f6580a4" (UID: "a7178784-85af-469a-8b70-b6949f6580a4"). InnerVolumeSpecName "kube-api-access-k45g8". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.040089 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7178784-85af-469a-8b70-b6949f6580a4-logs" (OuterVolumeSpecName: "logs") pod "a7178784-85af-469a-8b70-b6949f6580a4" (UID: "a7178784-85af-469a-8b70-b6949f6580a4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.066979 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7178784-85af-469a-8b70-b6949f6580a4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a7178784-85af-469a-8b70-b6949f6580a4" (UID: "a7178784-85af-469a-8b70-b6949f6580a4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.073381 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7178784-85af-469a-8b70-b6949f6580a4-config-data" (OuterVolumeSpecName: "config-data") pod "a7178784-85af-469a-8b70-b6949f6580a4" (UID: "a7178784-85af-469a-8b70-b6949f6580a4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.099510 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7178784-85af-469a-8b70-b6949f6580a4-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "a7178784-85af-469a-8b70-b6949f6580a4" (UID: "a7178784-85af-469a-8b70-b6949f6580a4"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.134537 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a7178784-85af-469a-8b70-b6949f6580a4-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.134574 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7178784-85af-469a-8b70-b6949f6580a4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.134585 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7178784-85af-469a-8b70-b6949f6580a4-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.134594 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k45g8\" (UniqueName: \"kubernetes.io/projected/a7178784-85af-469a-8b70-b6949f6580a4-kube-api-access-k45g8\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.134605 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7178784-85af-469a-8b70-b6949f6580a4-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:13 crc kubenswrapper[4818]: E0930 17:26:13.236425 4818 secret.go:188] Couldn't get secret watcher-kuttl-default/watcher-kuttl-decision-engine-config-data: secret "watcher-kuttl-decision-engine-config-data" not found Sep 30 17:26:13 crc kubenswrapper[4818]: E0930 17:26:13.236485 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-config-data podName:ece8baa6-5717-48d9-acca-3b01adcd52c7 nodeName:}" failed. No retries permitted until 2025-09-30 17:26:15.236472664 +0000 UTC m=+1621.990744480 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-config-data") pod "watcher-kuttl-decision-engine-0" (UID: "ece8baa6-5717-48d9-acca-3b01adcd52c7") : secret "watcher-kuttl-decision-engine-config-data" not found Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.705466 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"f5634d48-92cc-4cb1-9009-5bcd5ad54179","Type":"ContainerDied","Data":"d477723713b55fd5d7f083e6fa02aa464e961ef4cc01c263eaa32e2ab58da833"} Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.705509 4818 scope.go:117] "RemoveContainer" containerID="f957015d64e75cb3a2ebcdc628dbad48ff6a048cffa6abecd35687d0a0d19a96" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.705606 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.716628 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.716634 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"a7178784-85af-469a-8b70-b6949f6580a4","Type":"ContainerDied","Data":"aa406a9918aacd54cd46e13bccf7185253f09a1164f4ab77b87ebefd66404385"} Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.753341 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.757744 4818 scope.go:117] "RemoveContainer" containerID="11339a2da1384ee752135d142d8c7051e0cbebc5fa9d417cf308e901d45a9265" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.762771 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.771643 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.777030 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.792117 4818 scope.go:117] "RemoveContainer" containerID="730e89915686204547bd5406635b878a0c4d01f7e60c039d4ccf0dd7ba653fb7" Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.930549 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.930852 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerName="ceilometer-central-agent" containerID="cri-o://24e4b54bccbb43e81db28f431ce045404d6719d6930136e59a692023089df235" gracePeriod=30 Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.932102 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerName="proxy-httpd" containerID="cri-o://99e80bfe95ea6402c7aee07a619e64b57d000ad8da9e9320b37a10a36e5181af" gracePeriod=30 Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.932180 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerName="sg-core" containerID="cri-o://3fb0b9527c05edad2969524ccab413ebfcfaaa133c35f12e0586251d7457837d" gracePeriod=30 Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.932225 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerName="ceilometer-notification-agent" containerID="cri-o://0d26a9458d54df82fbe8e5699d1c968785a312fb9d7034dc562ddabecb1f2371" gracePeriod=30 Sep 30 17:26:13 crc kubenswrapper[4818]: I0930 17:26:13.944139 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:14 crc kubenswrapper[4818]: I0930 17:26:14.039014 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7178784-85af-469a-8b70-b6949f6580a4" path="/var/lib/kubelet/pods/a7178784-85af-469a-8b70-b6949f6580a4/volumes" Sep 30 17:26:14 crc kubenswrapper[4818]: I0930 17:26:14.039599 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5634d48-92cc-4cb1-9009-5bcd5ad54179" path="/var/lib/kubelet/pods/f5634d48-92cc-4cb1-9009-5bcd5ad54179/volumes" Sep 30 17:26:14 crc kubenswrapper[4818]: I0930 17:26:14.095762 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher8330-account-delete-r2hzd" Sep 30 17:26:14 crc kubenswrapper[4818]: I0930 17:26:14.250790 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9h9k\" (UniqueName: \"kubernetes.io/projected/1810722e-7a70-47c6-9fe4-2defb415ced8-kube-api-access-p9h9k\") pod \"1810722e-7a70-47c6-9fe4-2defb415ced8\" (UID: \"1810722e-7a70-47c6-9fe4-2defb415ced8\") " Sep 30 17:26:14 crc kubenswrapper[4818]: I0930 17:26:14.257010 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1810722e-7a70-47c6-9fe4-2defb415ced8-kube-api-access-p9h9k" (OuterVolumeSpecName: "kube-api-access-p9h9k") pod "1810722e-7a70-47c6-9fe4-2defb415ced8" (UID: "1810722e-7a70-47c6-9fe4-2defb415ced8"). InnerVolumeSpecName "kube-api-access-p9h9k". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:26:14 crc kubenswrapper[4818]: I0930 17:26:14.352862 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9h9k\" (UniqueName: \"kubernetes.io/projected/1810722e-7a70-47c6-9fe4-2defb415ced8-kube-api-access-p9h9k\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:14 crc kubenswrapper[4818]: I0930 17:26:14.726302 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher8330-account-delete-r2hzd" event={"ID":"1810722e-7a70-47c6-9fe4-2defb415ced8","Type":"ContainerDied","Data":"f0bc7618bdeb90d35781a3b707678e58bfc5c3eefaceac957963e5d4f74f1783"} Sep 30 17:26:14 crc kubenswrapper[4818]: I0930 17:26:14.726537 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f0bc7618bdeb90d35781a3b707678e58bfc5c3eefaceac957963e5d4f74f1783" Sep 30 17:26:14 crc kubenswrapper[4818]: I0930 17:26:14.727048 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher8330-account-delete-r2hzd" Sep 30 17:26:14 crc kubenswrapper[4818]: I0930 17:26:14.734135 4818 generic.go:334] "Generic (PLEG): container finished" podID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerID="99e80bfe95ea6402c7aee07a619e64b57d000ad8da9e9320b37a10a36e5181af" exitCode=0 Sep 30 17:26:14 crc kubenswrapper[4818]: I0930 17:26:14.734157 4818 generic.go:334] "Generic (PLEG): container finished" podID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerID="3fb0b9527c05edad2969524ccab413ebfcfaaa133c35f12e0586251d7457837d" exitCode=2 Sep 30 17:26:14 crc kubenswrapper[4818]: I0930 17:26:14.734165 4818 generic.go:334] "Generic (PLEG): container finished" podID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerID="24e4b54bccbb43e81db28f431ce045404d6719d6930136e59a692023089df235" exitCode=0 Sep 30 17:26:14 crc kubenswrapper[4818]: I0930 17:26:14.734212 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6e51b830-4d45-4b0f-8773-40e46ff074ee","Type":"ContainerDied","Data":"99e80bfe95ea6402c7aee07a619e64b57d000ad8da9e9320b37a10a36e5181af"} Sep 30 17:26:14 crc kubenswrapper[4818]: I0930 17:26:14.734272 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6e51b830-4d45-4b0f-8773-40e46ff074ee","Type":"ContainerDied","Data":"3fb0b9527c05edad2969524ccab413ebfcfaaa133c35f12e0586251d7457837d"} Sep 30 17:26:14 crc kubenswrapper[4818]: I0930 17:26:14.734288 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6e51b830-4d45-4b0f-8773-40e46ff074ee","Type":"ContainerDied","Data":"24e4b54bccbb43e81db28f431ce045404d6719d6930136e59a692023089df235"} Sep 30 17:26:15 crc kubenswrapper[4818]: E0930 17:26:15.267973 4818 secret.go:188] Couldn't get secret watcher-kuttl-default/watcher-kuttl-decision-engine-config-data: secret "watcher-kuttl-decision-engine-config-data" not found Sep 30 17:26:15 crc kubenswrapper[4818]: E0930 17:26:15.268052 4818 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-config-data podName:ece8baa6-5717-48d9-acca-3b01adcd52c7 nodeName:}" failed. No retries permitted until 2025-09-30 17:26:19.268037233 +0000 UTC m=+1626.022309049 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-config-data") pod "watcher-kuttl-decision-engine-0" (UID: "ece8baa6-5717-48d9-acca-3b01adcd52c7") : secret "watcher-kuttl-decision-engine-config-data" not found Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.255978 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-db-create-tzwn7"] Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.265421 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-db-create-tzwn7"] Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.271106 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher8330-account-delete-r2hzd"] Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.276137 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-8330-account-create-lc5wj"] Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.281216 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher8330-account-delete-r2hzd"] Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.286144 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-8330-account-create-lc5wj"] Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.766443 4818 generic.go:334] "Generic (PLEG): container finished" podID="ece8baa6-5717-48d9-acca-3b01adcd52c7" containerID="89fb1461ee943e21ce0cae0da63e60206b0104c82c883623f9bc4d06ae4d41d5" exitCode=0 Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.766569 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"ece8baa6-5717-48d9-acca-3b01adcd52c7","Type":"ContainerDied","Data":"89fb1461ee943e21ce0cae0da63e60206b0104c82c883623f9bc4d06ae4d41d5"} Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.766750 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"ece8baa6-5717-48d9-acca-3b01adcd52c7","Type":"ContainerDied","Data":"a771ebc021833ebd6734503ceff26824a7123646b540deeb2fa067dc95193d65"} Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.766770 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a771ebc021833ebd6734503ceff26824a7123646b540deeb2fa067dc95193d65" Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.828430 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.895358 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-custom-prometheus-ca\") pod \"ece8baa6-5717-48d9-acca-3b01adcd52c7\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.895489 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-cert-memcached-mtls\") pod \"ece8baa6-5717-48d9-acca-3b01adcd52c7\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.895606 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ece8baa6-5717-48d9-acca-3b01adcd52c7-logs\") pod \"ece8baa6-5717-48d9-acca-3b01adcd52c7\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.895705 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-combined-ca-bundle\") pod \"ece8baa6-5717-48d9-acca-3b01adcd52c7\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.895759 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wl5hb\" (UniqueName: \"kubernetes.io/projected/ece8baa6-5717-48d9-acca-3b01adcd52c7-kube-api-access-wl5hb\") pod \"ece8baa6-5717-48d9-acca-3b01adcd52c7\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.895800 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-config-data\") pod \"ece8baa6-5717-48d9-acca-3b01adcd52c7\" (UID: \"ece8baa6-5717-48d9-acca-3b01adcd52c7\") " Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.896460 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ece8baa6-5717-48d9-acca-3b01adcd52c7-logs" (OuterVolumeSpecName: "logs") pod "ece8baa6-5717-48d9-acca-3b01adcd52c7" (UID: "ece8baa6-5717-48d9-acca-3b01adcd52c7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.908311 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ece8baa6-5717-48d9-acca-3b01adcd52c7-kube-api-access-wl5hb" (OuterVolumeSpecName: "kube-api-access-wl5hb") pod "ece8baa6-5717-48d9-acca-3b01adcd52c7" (UID: "ece8baa6-5717-48d9-acca-3b01adcd52c7"). InnerVolumeSpecName "kube-api-access-wl5hb". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.929083 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "ece8baa6-5717-48d9-acca-3b01adcd52c7" (UID: "ece8baa6-5717-48d9-acca-3b01adcd52c7"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.938078 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ece8baa6-5717-48d9-acca-3b01adcd52c7" (UID: "ece8baa6-5717-48d9-acca-3b01adcd52c7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.946369 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-config-data" (OuterVolumeSpecName: "config-data") pod "ece8baa6-5717-48d9-acca-3b01adcd52c7" (UID: "ece8baa6-5717-48d9-acca-3b01adcd52c7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.972952 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "ece8baa6-5717-48d9-acca-3b01adcd52c7" (UID: "ece8baa6-5717-48d9-acca-3b01adcd52c7"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.998092 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.998129 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wl5hb\" (UniqueName: \"kubernetes.io/projected/ece8baa6-5717-48d9-acca-3b01adcd52c7-kube-api-access-wl5hb\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.998145 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.998161 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.998173 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/ece8baa6-5717-48d9-acca-3b01adcd52c7-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:16 crc kubenswrapper[4818]: I0930 17:26:16.998185 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ece8baa6-5717-48d9-acca-3b01adcd52c7-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:17 crc kubenswrapper[4818]: I0930 17:26:17.779063 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:17 crc kubenswrapper[4818]: I0930 17:26:17.826168 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:26:17 crc kubenswrapper[4818]: I0930 17:26:17.833457 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.032313 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1810722e-7a70-47c6-9fe4-2defb415ced8" path="/var/lib/kubelet/pods/1810722e-7a70-47c6-9fe4-2defb415ced8/volumes" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.032957 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34f15efb-9be5-4a1c-9ab1-49ea8d5f828f" path="/var/lib/kubelet/pods/34f15efb-9be5-4a1c-9ab1-49ea8d5f828f/volumes" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.033508 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e57330cc-de35-4537-bc9e-db3ff0e443b0" path="/var/lib/kubelet/pods/e57330cc-de35-4537-bc9e-db3ff0e443b0/volumes" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.034165 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ece8baa6-5717-48d9-acca-3b01adcd52c7" path="/var/lib/kubelet/pods/ece8baa6-5717-48d9-acca-3b01adcd52c7/volumes" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.280540 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-db-create-6lqgb"] Sep 30 17:26:18 crc kubenswrapper[4818]: E0930 17:26:18.280885 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1810722e-7a70-47c6-9fe4-2defb415ced8" containerName="mariadb-account-delete" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.280903 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="1810722e-7a70-47c6-9fe4-2defb415ced8" containerName="mariadb-account-delete" Sep 30 17:26:18 crc kubenswrapper[4818]: E0930 17:26:18.280930 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5634d48-92cc-4cb1-9009-5bcd5ad54179" containerName="watcher-api" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.280937 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5634d48-92cc-4cb1-9009-5bcd5ad54179" containerName="watcher-api" Sep 30 17:26:18 crc kubenswrapper[4818]: E0930 17:26:18.280953 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7178784-85af-469a-8b70-b6949f6580a4" containerName="watcher-applier" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.280958 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7178784-85af-469a-8b70-b6949f6580a4" containerName="watcher-applier" Sep 30 17:26:18 crc kubenswrapper[4818]: E0930 17:26:18.280969 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ece8baa6-5717-48d9-acca-3b01adcd52c7" containerName="watcher-decision-engine" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.280974 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="ece8baa6-5717-48d9-acca-3b01adcd52c7" containerName="watcher-decision-engine" Sep 30 17:26:18 crc kubenswrapper[4818]: E0930 17:26:18.281053 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5634d48-92cc-4cb1-9009-5bcd5ad54179" containerName="watcher-kuttl-api-log" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.281060 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5634d48-92cc-4cb1-9009-5bcd5ad54179" containerName="watcher-kuttl-api-log" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.281205 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7178784-85af-469a-8b70-b6949f6580a4" containerName="watcher-applier" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.281224 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="ece8baa6-5717-48d9-acca-3b01adcd52c7" containerName="watcher-decision-engine" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.281235 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5634d48-92cc-4cb1-9009-5bcd5ad54179" containerName="watcher-api" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.281243 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5634d48-92cc-4cb1-9009-5bcd5ad54179" containerName="watcher-kuttl-api-log" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.281260 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="1810722e-7a70-47c6-9fe4-2defb415ced8" containerName="mariadb-account-delete" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.281804 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-6lqgb" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.331166 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-6lqgb"] Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.423046 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncg26\" (UniqueName: \"kubernetes.io/projected/408f4ce8-e091-4a55-8597-40cc51e3082e-kube-api-access-ncg26\") pod \"watcher-db-create-6lqgb\" (UID: \"408f4ce8-e091-4a55-8597-40cc51e3082e\") " pod="watcher-kuttl-default/watcher-db-create-6lqgb" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.524677 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncg26\" (UniqueName: \"kubernetes.io/projected/408f4ce8-e091-4a55-8597-40cc51e3082e-kube-api-access-ncg26\") pod \"watcher-db-create-6lqgb\" (UID: \"408f4ce8-e091-4a55-8597-40cc51e3082e\") " pod="watcher-kuttl-default/watcher-db-create-6lqgb" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.552095 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncg26\" (UniqueName: \"kubernetes.io/projected/408f4ce8-e091-4a55-8597-40cc51e3082e-kube-api-access-ncg26\") pod \"watcher-db-create-6lqgb\" (UID: \"408f4ce8-e091-4a55-8597-40cc51e3082e\") " pod="watcher-kuttl-default/watcher-db-create-6lqgb" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.596962 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-6lqgb" Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.804671 4818 generic.go:334] "Generic (PLEG): container finished" podID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerID="0d26a9458d54df82fbe8e5699d1c968785a312fb9d7034dc562ddabecb1f2371" exitCode=0 Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.804953 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6e51b830-4d45-4b0f-8773-40e46ff074ee","Type":"ContainerDied","Data":"0d26a9458d54df82fbe8e5699d1c968785a312fb9d7034dc562ddabecb1f2371"} Sep 30 17:26:18 crc kubenswrapper[4818]: I0930 17:26:18.975946 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.035741 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-sg-core-conf-yaml\") pod \"6e51b830-4d45-4b0f-8773-40e46ff074ee\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.035786 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e51b830-4d45-4b0f-8773-40e46ff074ee-log-httpd\") pod \"6e51b830-4d45-4b0f-8773-40e46ff074ee\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.035814 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-ceilometer-tls-certs\") pod \"6e51b830-4d45-4b0f-8773-40e46ff074ee\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.035893 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e51b830-4d45-4b0f-8773-40e46ff074ee-run-httpd\") pod \"6e51b830-4d45-4b0f-8773-40e46ff074ee\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.035930 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-config-data\") pod \"6e51b830-4d45-4b0f-8773-40e46ff074ee\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.035967 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-combined-ca-bundle\") pod \"6e51b830-4d45-4b0f-8773-40e46ff074ee\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.036028 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7lh85\" (UniqueName: \"kubernetes.io/projected/6e51b830-4d45-4b0f-8773-40e46ff074ee-kube-api-access-7lh85\") pod \"6e51b830-4d45-4b0f-8773-40e46ff074ee\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.036044 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-scripts\") pod \"6e51b830-4d45-4b0f-8773-40e46ff074ee\" (UID: \"6e51b830-4d45-4b0f-8773-40e46ff074ee\") " Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.036279 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e51b830-4d45-4b0f-8773-40e46ff074ee-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6e51b830-4d45-4b0f-8773-40e46ff074ee" (UID: "6e51b830-4d45-4b0f-8773-40e46ff074ee"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.037006 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e51b830-4d45-4b0f-8773-40e46ff074ee-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6e51b830-4d45-4b0f-8773-40e46ff074ee" (UID: "6e51b830-4d45-4b0f-8773-40e46ff074ee"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.043050 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-scripts" (OuterVolumeSpecName: "scripts") pod "6e51b830-4d45-4b0f-8773-40e46ff074ee" (UID: "6e51b830-4d45-4b0f-8773-40e46ff074ee"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.045163 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e51b830-4d45-4b0f-8773-40e46ff074ee-kube-api-access-7lh85" (OuterVolumeSpecName: "kube-api-access-7lh85") pod "6e51b830-4d45-4b0f-8773-40e46ff074ee" (UID: "6e51b830-4d45-4b0f-8773-40e46ff074ee"). InnerVolumeSpecName "kube-api-access-7lh85". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.069187 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6e51b830-4d45-4b0f-8773-40e46ff074ee" (UID: "6e51b830-4d45-4b0f-8773-40e46ff074ee"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.091135 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "6e51b830-4d45-4b0f-8773-40e46ff074ee" (UID: "6e51b830-4d45-4b0f-8773-40e46ff074ee"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.111333 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6e51b830-4d45-4b0f-8773-40e46ff074ee" (UID: "6e51b830-4d45-4b0f-8773-40e46ff074ee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.135180 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-config-data" (OuterVolumeSpecName: "config-data") pod "6e51b830-4d45-4b0f-8773-40e46ff074ee" (UID: "6e51b830-4d45-4b0f-8773-40e46ff074ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.138043 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.138073 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e51b830-4d45-4b0f-8773-40e46ff074ee-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.138083 4818 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.138092 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e51b830-4d45-4b0f-8773-40e46ff074ee-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.138100 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.138108 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.138119 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7lh85\" (UniqueName: \"kubernetes.io/projected/6e51b830-4d45-4b0f-8773-40e46ff074ee-kube-api-access-7lh85\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.138129 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e51b830-4d45-4b0f-8773-40e46ff074ee-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.148632 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-6lqgb"] Sep 30 17:26:19 crc kubenswrapper[4818]: W0930 17:26:19.158149 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod408f4ce8_e091_4a55_8597_40cc51e3082e.slice/crio-b2ded4933ad50e88ae98a90b20343577deadaa4ebe1762a4e42ab287a8c52645 WatchSource:0}: Error finding container b2ded4933ad50e88ae98a90b20343577deadaa4ebe1762a4e42ab287a8c52645: Status 404 returned error can't find the container with id b2ded4933ad50e88ae98a90b20343577deadaa4ebe1762a4e42ab287a8c52645 Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.814098 4818 generic.go:334] "Generic (PLEG): container finished" podID="408f4ce8-e091-4a55-8597-40cc51e3082e" containerID="ace498affd6e119be4a76f4999f60a2b1e10e048bd4cb979ac71ae983d510a7e" exitCode=0 Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.814179 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-6lqgb" event={"ID":"408f4ce8-e091-4a55-8597-40cc51e3082e","Type":"ContainerDied","Data":"ace498affd6e119be4a76f4999f60a2b1e10e048bd4cb979ac71ae983d510a7e"} Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.814211 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-6lqgb" event={"ID":"408f4ce8-e091-4a55-8597-40cc51e3082e","Type":"ContainerStarted","Data":"b2ded4933ad50e88ae98a90b20343577deadaa4ebe1762a4e42ab287a8c52645"} Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.816658 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6e51b830-4d45-4b0f-8773-40e46ff074ee","Type":"ContainerDied","Data":"fc1f5d1428ca5b569c569cce9ff8f7903e2e6dd3500b68739c01f4cb779479d9"} Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.816694 4818 scope.go:117] "RemoveContainer" containerID="99e80bfe95ea6402c7aee07a619e64b57d000ad8da9e9320b37a10a36e5181af" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.816730 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.837183 4818 scope.go:117] "RemoveContainer" containerID="3fb0b9527c05edad2969524ccab413ebfcfaaa133c35f12e0586251d7457837d" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.848903 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.856112 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.858949 4818 scope.go:117] "RemoveContainer" containerID="0d26a9458d54df82fbe8e5699d1c968785a312fb9d7034dc562ddabecb1f2371" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.869515 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:26:19 crc kubenswrapper[4818]: E0930 17:26:19.869802 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerName="sg-core" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.869819 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerName="sg-core" Sep 30 17:26:19 crc kubenswrapper[4818]: E0930 17:26:19.869833 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerName="ceilometer-central-agent" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.869840 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerName="ceilometer-central-agent" Sep 30 17:26:19 crc kubenswrapper[4818]: E0930 17:26:19.869861 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerName="proxy-httpd" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.869867 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerName="proxy-httpd" Sep 30 17:26:19 crc kubenswrapper[4818]: E0930 17:26:19.869886 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerName="ceilometer-notification-agent" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.869892 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerName="ceilometer-notification-agent" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.870074 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerName="sg-core" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.870085 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerName="ceilometer-notification-agent" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.870097 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerName="ceilometer-central-agent" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.870111 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e51b830-4d45-4b0f-8773-40e46ff074ee" containerName="proxy-httpd" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.871833 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.877871 4818 scope.go:117] "RemoveContainer" containerID="24e4b54bccbb43e81db28f431ce045404d6719d6930136e59a692023089df235" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.878199 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.878961 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.879216 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.879811 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.949695 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10b497f0-b54f-490a-bb1b-3fba7dee4217-log-httpd\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.949753 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.949823 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-scripts\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.949877 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.949904 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10b497f0-b54f-490a-bb1b-3fba7dee4217-run-httpd\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.949941 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-config-data\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.949985 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qt8z\" (UniqueName: \"kubernetes.io/projected/10b497f0-b54f-490a-bb1b-3fba7dee4217-kube-api-access-4qt8z\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:19 crc kubenswrapper[4818]: I0930 17:26:19.950001 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.029292 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e51b830-4d45-4b0f-8773-40e46ff074ee" path="/var/lib/kubelet/pods/6e51b830-4d45-4b0f-8773-40e46ff074ee/volumes" Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.051813 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10b497f0-b54f-490a-bb1b-3fba7dee4217-log-httpd\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.051872 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.051915 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-scripts\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.051958 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.051975 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10b497f0-b54f-490a-bb1b-3fba7dee4217-run-httpd\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.051990 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-config-data\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.052027 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qt8z\" (UniqueName: \"kubernetes.io/projected/10b497f0-b54f-490a-bb1b-3fba7dee4217-kube-api-access-4qt8z\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.052043 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.054117 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10b497f0-b54f-490a-bb1b-3fba7dee4217-run-httpd\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.054301 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10b497f0-b54f-490a-bb1b-3fba7dee4217-log-httpd\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.057219 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.058121 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.060514 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.062961 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-scripts\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.066842 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-config-data\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.072576 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qt8z\" (UniqueName: \"kubernetes.io/projected/10b497f0-b54f-490a-bb1b-3fba7dee4217-kube-api-access-4qt8z\") pod \"ceilometer-0\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.196016 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.655252 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:26:20 crc kubenswrapper[4818]: I0930 17:26:20.825495 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"10b497f0-b54f-490a-bb1b-3fba7dee4217","Type":"ContainerStarted","Data":"d7977c4b356f494f51e6ada8643b0726a057603ae14d4034c95ac85a13520b1b"} Sep 30 17:26:21 crc kubenswrapper[4818]: I0930 17:26:21.274884 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-6lqgb" Sep 30 17:26:21 crc kubenswrapper[4818]: I0930 17:26:21.379815 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ncg26\" (UniqueName: \"kubernetes.io/projected/408f4ce8-e091-4a55-8597-40cc51e3082e-kube-api-access-ncg26\") pod \"408f4ce8-e091-4a55-8597-40cc51e3082e\" (UID: \"408f4ce8-e091-4a55-8597-40cc51e3082e\") " Sep 30 17:26:21 crc kubenswrapper[4818]: I0930 17:26:21.386152 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/408f4ce8-e091-4a55-8597-40cc51e3082e-kube-api-access-ncg26" (OuterVolumeSpecName: "kube-api-access-ncg26") pod "408f4ce8-e091-4a55-8597-40cc51e3082e" (UID: "408f4ce8-e091-4a55-8597-40cc51e3082e"). InnerVolumeSpecName "kube-api-access-ncg26". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:26:21 crc kubenswrapper[4818]: I0930 17:26:21.481617 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ncg26\" (UniqueName: \"kubernetes.io/projected/408f4ce8-e091-4a55-8597-40cc51e3082e-kube-api-access-ncg26\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:21 crc kubenswrapper[4818]: I0930 17:26:21.848484 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"10b497f0-b54f-490a-bb1b-3fba7dee4217","Type":"ContainerStarted","Data":"c391cbf046f64966d143acb6bb6926200c1f61fb7c54c064903e860ed59e4422"} Sep 30 17:26:21 crc kubenswrapper[4818]: I0930 17:26:21.853037 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-6lqgb" event={"ID":"408f4ce8-e091-4a55-8597-40cc51e3082e","Type":"ContainerDied","Data":"b2ded4933ad50e88ae98a90b20343577deadaa4ebe1762a4e42ab287a8c52645"} Sep 30 17:26:21 crc kubenswrapper[4818]: I0930 17:26:21.853096 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2ded4933ad50e88ae98a90b20343577deadaa4ebe1762a4e42ab287a8c52645" Sep 30 17:26:21 crc kubenswrapper[4818]: I0930 17:26:21.853163 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-6lqgb" Sep 30 17:26:22 crc kubenswrapper[4818]: I0930 17:26:22.062268 4818 scope.go:117] "RemoveContainer" containerID="80d6481dd9818317d403e845eff74a7b63727c657f00be22c0e231a5b6c0c43e" Sep 30 17:26:22 crc kubenswrapper[4818]: I0930 17:26:22.109110 4818 scope.go:117] "RemoveContainer" containerID="82def7b5c6178da1bab4750adb32623e59ef5a6fe90efbb416a24abed939eaea" Sep 30 17:26:22 crc kubenswrapper[4818]: I0930 17:26:22.148947 4818 scope.go:117] "RemoveContainer" containerID="0eaf6396e5a1e35819db86e64eb39716096c96070dece87bf3ee4f69103ab646" Sep 30 17:26:22 crc kubenswrapper[4818]: I0930 17:26:22.180365 4818 scope.go:117] "RemoveContainer" containerID="1b9863a9bbfd7aa35e4971c02ce5bc6dca01a8d6f74cfed8d6236a8b2c12ac1f" Sep 30 17:26:22 crc kubenswrapper[4818]: I0930 17:26:22.210018 4818 scope.go:117] "RemoveContainer" containerID="840a104e3b6b271ef2c90646821d246affcefe3f212282fabb4b9073d8040c0a" Sep 30 17:26:22 crc kubenswrapper[4818]: I0930 17:26:22.862858 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"10b497f0-b54f-490a-bb1b-3fba7dee4217","Type":"ContainerStarted","Data":"f94b002387872a7228db1803dcd048b31d6818707b3b92f3b53200b4c442977d"} Sep 30 17:26:23 crc kubenswrapper[4818]: I0930 17:26:23.021825 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:26:23 crc kubenswrapper[4818]: E0930 17:26:23.022125 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:26:23 crc kubenswrapper[4818]: I0930 17:26:23.873747 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"10b497f0-b54f-490a-bb1b-3fba7dee4217","Type":"ContainerStarted","Data":"8a2cc36da3eb19462dcb9374bac730512a177bac6de66565f8024786849fd15b"} Sep 30 17:26:25 crc kubenswrapper[4818]: I0930 17:26:25.890542 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"10b497f0-b54f-490a-bb1b-3fba7dee4217","Type":"ContainerStarted","Data":"d787ea95c262090693fae9c417cc5000590e3b6310e3acc55658534893999ed2"} Sep 30 17:26:25 crc kubenswrapper[4818]: I0930 17:26:25.891135 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:25 crc kubenswrapper[4818]: I0930 17:26:25.922252 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=2.744713265 podStartE2EDuration="6.922226307s" podCreationTimestamp="2025-09-30 17:26:19 +0000 UTC" firstStartedPulling="2025-09-30 17:26:20.66850921 +0000 UTC m=+1627.422781026" lastFinishedPulling="2025-09-30 17:26:24.846022252 +0000 UTC m=+1631.600294068" observedRunningTime="2025-09-30 17:26:25.913000847 +0000 UTC m=+1632.667272683" watchObservedRunningTime="2025-09-30 17:26:25.922226307 +0000 UTC m=+1632.676498143" Sep 30 17:26:28 crc kubenswrapper[4818]: I0930 17:26:28.394569 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-8f08-account-create-fq6nv"] Sep 30 17:26:28 crc kubenswrapper[4818]: E0930 17:26:28.395264 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="408f4ce8-e091-4a55-8597-40cc51e3082e" containerName="mariadb-database-create" Sep 30 17:26:28 crc kubenswrapper[4818]: I0930 17:26:28.395279 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="408f4ce8-e091-4a55-8597-40cc51e3082e" containerName="mariadb-database-create" Sep 30 17:26:28 crc kubenswrapper[4818]: I0930 17:26:28.395423 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="408f4ce8-e091-4a55-8597-40cc51e3082e" containerName="mariadb-database-create" Sep 30 17:26:28 crc kubenswrapper[4818]: I0930 17:26:28.396048 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-8f08-account-create-fq6nv" Sep 30 17:26:28 crc kubenswrapper[4818]: I0930 17:26:28.399220 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-db-secret" Sep 30 17:26:28 crc kubenswrapper[4818]: I0930 17:26:28.410637 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-8f08-account-create-fq6nv"] Sep 30 17:26:28 crc kubenswrapper[4818]: I0930 17:26:28.494646 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5f69\" (UniqueName: \"kubernetes.io/projected/3948095a-c90c-4ad2-85cf-2d269bd00bb8-kube-api-access-f5f69\") pod \"watcher-8f08-account-create-fq6nv\" (UID: \"3948095a-c90c-4ad2-85cf-2d269bd00bb8\") " pod="watcher-kuttl-default/watcher-8f08-account-create-fq6nv" Sep 30 17:26:28 crc kubenswrapper[4818]: I0930 17:26:28.596165 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5f69\" (UniqueName: \"kubernetes.io/projected/3948095a-c90c-4ad2-85cf-2d269bd00bb8-kube-api-access-f5f69\") pod \"watcher-8f08-account-create-fq6nv\" (UID: \"3948095a-c90c-4ad2-85cf-2d269bd00bb8\") " pod="watcher-kuttl-default/watcher-8f08-account-create-fq6nv" Sep 30 17:26:28 crc kubenswrapper[4818]: I0930 17:26:28.619833 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5f69\" (UniqueName: \"kubernetes.io/projected/3948095a-c90c-4ad2-85cf-2d269bd00bb8-kube-api-access-f5f69\") pod \"watcher-8f08-account-create-fq6nv\" (UID: \"3948095a-c90c-4ad2-85cf-2d269bd00bb8\") " pod="watcher-kuttl-default/watcher-8f08-account-create-fq6nv" Sep 30 17:26:28 crc kubenswrapper[4818]: I0930 17:26:28.731145 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-8f08-account-create-fq6nv" Sep 30 17:26:29 crc kubenswrapper[4818]: I0930 17:26:29.247763 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-8f08-account-create-fq6nv"] Sep 30 17:26:29 crc kubenswrapper[4818]: W0930 17:26:29.257515 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3948095a_c90c_4ad2_85cf_2d269bd00bb8.slice/crio-7bd9b524a028a47a9391ab15eb1eef3353048022dc38371632e616b343df11ed WatchSource:0}: Error finding container 7bd9b524a028a47a9391ab15eb1eef3353048022dc38371632e616b343df11ed: Status 404 returned error can't find the container with id 7bd9b524a028a47a9391ab15eb1eef3353048022dc38371632e616b343df11ed Sep 30 17:26:29 crc kubenswrapper[4818]: I0930 17:26:29.937492 4818 generic.go:334] "Generic (PLEG): container finished" podID="3948095a-c90c-4ad2-85cf-2d269bd00bb8" containerID="f9ba60ee08f4f5c5b0c3e91328071b34af72409c0c8b6d65deb4688c4e2c43c2" exitCode=0 Sep 30 17:26:29 crc kubenswrapper[4818]: I0930 17:26:29.937607 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-8f08-account-create-fq6nv" event={"ID":"3948095a-c90c-4ad2-85cf-2d269bd00bb8","Type":"ContainerDied","Data":"f9ba60ee08f4f5c5b0c3e91328071b34af72409c0c8b6d65deb4688c4e2c43c2"} Sep 30 17:26:29 crc kubenswrapper[4818]: I0930 17:26:29.937761 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-8f08-account-create-fq6nv" event={"ID":"3948095a-c90c-4ad2-85cf-2d269bd00bb8","Type":"ContainerStarted","Data":"7bd9b524a028a47a9391ab15eb1eef3353048022dc38371632e616b343df11ed"} Sep 30 17:26:31 crc kubenswrapper[4818]: I0930 17:26:31.315786 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-8f08-account-create-fq6nv" Sep 30 17:26:31 crc kubenswrapper[4818]: I0930 17:26:31.450321 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f5f69\" (UniqueName: \"kubernetes.io/projected/3948095a-c90c-4ad2-85cf-2d269bd00bb8-kube-api-access-f5f69\") pod \"3948095a-c90c-4ad2-85cf-2d269bd00bb8\" (UID: \"3948095a-c90c-4ad2-85cf-2d269bd00bb8\") " Sep 30 17:26:31 crc kubenswrapper[4818]: I0930 17:26:31.456548 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3948095a-c90c-4ad2-85cf-2d269bd00bb8-kube-api-access-f5f69" (OuterVolumeSpecName: "kube-api-access-f5f69") pod "3948095a-c90c-4ad2-85cf-2d269bd00bb8" (UID: "3948095a-c90c-4ad2-85cf-2d269bd00bb8"). InnerVolumeSpecName "kube-api-access-f5f69". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:26:31 crc kubenswrapper[4818]: I0930 17:26:31.552106 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f5f69\" (UniqueName: \"kubernetes.io/projected/3948095a-c90c-4ad2-85cf-2d269bd00bb8-kube-api-access-f5f69\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:31 crc kubenswrapper[4818]: I0930 17:26:31.956839 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-8f08-account-create-fq6nv" event={"ID":"3948095a-c90c-4ad2-85cf-2d269bd00bb8","Type":"ContainerDied","Data":"7bd9b524a028a47a9391ab15eb1eef3353048022dc38371632e616b343df11ed"} Sep 30 17:26:31 crc kubenswrapper[4818]: I0930 17:26:31.956877 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7bd9b524a028a47a9391ab15eb1eef3353048022dc38371632e616b343df11ed" Sep 30 17:26:31 crc kubenswrapper[4818]: I0930 17:26:31.957002 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-8f08-account-create-fq6nv" Sep 30 17:26:33 crc kubenswrapper[4818]: I0930 17:26:33.718738 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k"] Sep 30 17:26:33 crc kubenswrapper[4818]: E0930 17:26:33.719525 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3948095a-c90c-4ad2-85cf-2d269bd00bb8" containerName="mariadb-account-create" Sep 30 17:26:33 crc kubenswrapper[4818]: I0930 17:26:33.719547 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="3948095a-c90c-4ad2-85cf-2d269bd00bb8" containerName="mariadb-account-create" Sep 30 17:26:33 crc kubenswrapper[4818]: I0930 17:26:33.719823 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="3948095a-c90c-4ad2-85cf-2d269bd00bb8" containerName="mariadb-account-create" Sep 30 17:26:33 crc kubenswrapper[4818]: I0930 17:26:33.720736 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" Sep 30 17:26:33 crc kubenswrapper[4818]: I0930 17:26:33.723055 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-vzqvm" Sep 30 17:26:33 crc kubenswrapper[4818]: I0930 17:26:33.723234 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-config-data" Sep 30 17:26:33 crc kubenswrapper[4818]: I0930 17:26:33.731244 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k"] Sep 30 17:26:33 crc kubenswrapper[4818]: I0930 17:26:33.789318 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70245c64-f8a6-4309-ae8d-f55cb4227c3c-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-sdl6k\" (UID: \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" Sep 30 17:26:33 crc kubenswrapper[4818]: I0930 17:26:33.789396 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70245c64-f8a6-4309-ae8d-f55cb4227c3c-config-data\") pod \"watcher-kuttl-db-sync-sdl6k\" (UID: \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" Sep 30 17:26:33 crc kubenswrapper[4818]: I0930 17:26:33.789427 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/70245c64-f8a6-4309-ae8d-f55cb4227c3c-db-sync-config-data\") pod \"watcher-kuttl-db-sync-sdl6k\" (UID: \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" Sep 30 17:26:33 crc kubenswrapper[4818]: I0930 17:26:33.789762 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9qr7\" (UniqueName: \"kubernetes.io/projected/70245c64-f8a6-4309-ae8d-f55cb4227c3c-kube-api-access-p9qr7\") pod \"watcher-kuttl-db-sync-sdl6k\" (UID: \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" Sep 30 17:26:33 crc kubenswrapper[4818]: I0930 17:26:33.891063 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70245c64-f8a6-4309-ae8d-f55cb4227c3c-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-sdl6k\" (UID: \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" Sep 30 17:26:33 crc kubenswrapper[4818]: I0930 17:26:33.891112 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70245c64-f8a6-4309-ae8d-f55cb4227c3c-config-data\") pod \"watcher-kuttl-db-sync-sdl6k\" (UID: \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" Sep 30 17:26:33 crc kubenswrapper[4818]: I0930 17:26:33.891129 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/70245c64-f8a6-4309-ae8d-f55cb4227c3c-db-sync-config-data\") pod \"watcher-kuttl-db-sync-sdl6k\" (UID: \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" Sep 30 17:26:33 crc kubenswrapper[4818]: I0930 17:26:33.891198 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9qr7\" (UniqueName: \"kubernetes.io/projected/70245c64-f8a6-4309-ae8d-f55cb4227c3c-kube-api-access-p9qr7\") pod \"watcher-kuttl-db-sync-sdl6k\" (UID: \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" Sep 30 17:26:33 crc kubenswrapper[4818]: I0930 17:26:33.895942 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/70245c64-f8a6-4309-ae8d-f55cb4227c3c-db-sync-config-data\") pod \"watcher-kuttl-db-sync-sdl6k\" (UID: \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" Sep 30 17:26:33 crc kubenswrapper[4818]: I0930 17:26:33.896572 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70245c64-f8a6-4309-ae8d-f55cb4227c3c-config-data\") pod \"watcher-kuttl-db-sync-sdl6k\" (UID: \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" Sep 30 17:26:33 crc kubenswrapper[4818]: I0930 17:26:33.904633 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70245c64-f8a6-4309-ae8d-f55cb4227c3c-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-sdl6k\" (UID: \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" Sep 30 17:26:33 crc kubenswrapper[4818]: I0930 17:26:33.912474 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9qr7\" (UniqueName: \"kubernetes.io/projected/70245c64-f8a6-4309-ae8d-f55cb4227c3c-kube-api-access-p9qr7\") pod \"watcher-kuttl-db-sync-sdl6k\" (UID: \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" Sep 30 17:26:34 crc kubenswrapper[4818]: I0930 17:26:34.044346 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" Sep 30 17:26:34 crc kubenswrapper[4818]: I0930 17:26:34.572708 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k"] Sep 30 17:26:34 crc kubenswrapper[4818]: W0930 17:26:34.588759 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod70245c64_f8a6_4309_ae8d_f55cb4227c3c.slice/crio-f4545f61ff094f33ccbfcd28b1e6627a0fee5c2fcc01d25efa49a582532a03e2 WatchSource:0}: Error finding container f4545f61ff094f33ccbfcd28b1e6627a0fee5c2fcc01d25efa49a582532a03e2: Status 404 returned error can't find the container with id f4545f61ff094f33ccbfcd28b1e6627a0fee5c2fcc01d25efa49a582532a03e2 Sep 30 17:26:34 crc kubenswrapper[4818]: I0930 17:26:34.982034 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" event={"ID":"70245c64-f8a6-4309-ae8d-f55cb4227c3c","Type":"ContainerStarted","Data":"eff88837534f86422a3581d1f5d7096f69fc81784a5310142265a94e5d9ad6cf"} Sep 30 17:26:34 crc kubenswrapper[4818]: I0930 17:26:34.982369 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" event={"ID":"70245c64-f8a6-4309-ae8d-f55cb4227c3c","Type":"ContainerStarted","Data":"f4545f61ff094f33ccbfcd28b1e6627a0fee5c2fcc01d25efa49a582532a03e2"} Sep 30 17:26:35 crc kubenswrapper[4818]: I0930 17:26:35.004536 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" podStartSLOduration=2.004519316 podStartE2EDuration="2.004519316s" podCreationTimestamp="2025-09-30 17:26:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:26:35.000537049 +0000 UTC m=+1641.754808885" watchObservedRunningTime="2025-09-30 17:26:35.004519316 +0000 UTC m=+1641.758791132" Sep 30 17:26:36 crc kubenswrapper[4818]: I0930 17:26:36.020386 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:26:36 crc kubenswrapper[4818]: E0930 17:26:36.020957 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:26:38 crc kubenswrapper[4818]: I0930 17:26:38.008559 4818 generic.go:334] "Generic (PLEG): container finished" podID="70245c64-f8a6-4309-ae8d-f55cb4227c3c" containerID="eff88837534f86422a3581d1f5d7096f69fc81784a5310142265a94e5d9ad6cf" exitCode=0 Sep 30 17:26:38 crc kubenswrapper[4818]: I0930 17:26:38.008636 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" event={"ID":"70245c64-f8a6-4309-ae8d-f55cb4227c3c","Type":"ContainerDied","Data":"eff88837534f86422a3581d1f5d7096f69fc81784a5310142265a94e5d9ad6cf"} Sep 30 17:26:39 crc kubenswrapper[4818]: I0930 17:26:39.457200 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" Sep 30 17:26:39 crc kubenswrapper[4818]: I0930 17:26:39.583296 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70245c64-f8a6-4309-ae8d-f55cb4227c3c-combined-ca-bundle\") pod \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\" (UID: \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\") " Sep 30 17:26:39 crc kubenswrapper[4818]: I0930 17:26:39.583358 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9qr7\" (UniqueName: \"kubernetes.io/projected/70245c64-f8a6-4309-ae8d-f55cb4227c3c-kube-api-access-p9qr7\") pod \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\" (UID: \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\") " Sep 30 17:26:39 crc kubenswrapper[4818]: I0930 17:26:39.583398 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/70245c64-f8a6-4309-ae8d-f55cb4227c3c-db-sync-config-data\") pod \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\" (UID: \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\") " Sep 30 17:26:39 crc kubenswrapper[4818]: I0930 17:26:39.583464 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70245c64-f8a6-4309-ae8d-f55cb4227c3c-config-data\") pod \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\" (UID: \"70245c64-f8a6-4309-ae8d-f55cb4227c3c\") " Sep 30 17:26:39 crc kubenswrapper[4818]: I0930 17:26:39.591104 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70245c64-f8a6-4309-ae8d-f55cb4227c3c-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "70245c64-f8a6-4309-ae8d-f55cb4227c3c" (UID: "70245c64-f8a6-4309-ae8d-f55cb4227c3c"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:39 crc kubenswrapper[4818]: I0930 17:26:39.591134 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70245c64-f8a6-4309-ae8d-f55cb4227c3c-kube-api-access-p9qr7" (OuterVolumeSpecName: "kube-api-access-p9qr7") pod "70245c64-f8a6-4309-ae8d-f55cb4227c3c" (UID: "70245c64-f8a6-4309-ae8d-f55cb4227c3c"). InnerVolumeSpecName "kube-api-access-p9qr7". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:26:39 crc kubenswrapper[4818]: I0930 17:26:39.619074 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70245c64-f8a6-4309-ae8d-f55cb4227c3c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "70245c64-f8a6-4309-ae8d-f55cb4227c3c" (UID: "70245c64-f8a6-4309-ae8d-f55cb4227c3c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:39 crc kubenswrapper[4818]: I0930 17:26:39.641114 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70245c64-f8a6-4309-ae8d-f55cb4227c3c-config-data" (OuterVolumeSpecName: "config-data") pod "70245c64-f8a6-4309-ae8d-f55cb4227c3c" (UID: "70245c64-f8a6-4309-ae8d-f55cb4227c3c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:39 crc kubenswrapper[4818]: I0930 17:26:39.685318 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70245c64-f8a6-4309-ae8d-f55cb4227c3c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:39 crc kubenswrapper[4818]: I0930 17:26:39.685393 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9qr7\" (UniqueName: \"kubernetes.io/projected/70245c64-f8a6-4309-ae8d-f55cb4227c3c-kube-api-access-p9qr7\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:39 crc kubenswrapper[4818]: I0930 17:26:39.685462 4818 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/70245c64-f8a6-4309-ae8d-f55cb4227c3c-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:39 crc kubenswrapper[4818]: I0930 17:26:39.685473 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70245c64-f8a6-4309-ae8d-f55cb4227c3c-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.035424 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.051238 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k" event={"ID":"70245c64-f8a6-4309-ae8d-f55cb4227c3c","Type":"ContainerDied","Data":"f4545f61ff094f33ccbfcd28b1e6627a0fee5c2fcc01d25efa49a582532a03e2"} Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.051280 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f4545f61ff094f33ccbfcd28b1e6627a0fee5c2fcc01d25efa49a582532a03e2" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.291744 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:26:40 crc kubenswrapper[4818]: E0930 17:26:40.292103 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70245c64-f8a6-4309-ae8d-f55cb4227c3c" containerName="watcher-kuttl-db-sync" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.292122 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="70245c64-f8a6-4309-ae8d-f55cb4227c3c" containerName="watcher-kuttl-db-sync" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.292272 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="70245c64-f8a6-4309-ae8d-f55cb4227c3c" containerName="watcher-kuttl-db-sync" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.293316 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.295658 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-vzqvm" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.295878 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.311890 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.313308 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.319653 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.321228 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.325411 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-applier-config-data" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.328528 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.339754 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.369822 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.404549 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.404586 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-726jq\" (UniqueName: \"kubernetes.io/projected/1147e46b-b24d-443a-87a8-681ad84ede4b-kube-api-access-726jq\") pod \"watcher-kuttl-api-1\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.404605 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-config-data\") pod \"watcher-kuttl-api-1\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.404710 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1147e46b-b24d-443a-87a8-681ad84ede4b-logs\") pod \"watcher-kuttl-api-1\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.404745 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37a56683-e6d7-447b-9d7d-c40eef531ba0-logs\") pod \"watcher-kuttl-api-0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.404770 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlp92\" (UniqueName: \"kubernetes.io/projected/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-kube-api-access-mlp92\") pod \"watcher-kuttl-applier-0\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.404818 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.404847 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.404866 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlsp5\" (UniqueName: \"kubernetes.io/projected/37a56683-e6d7-447b-9d7d-c40eef531ba0-kube-api-access-dlsp5\") pod \"watcher-kuttl-api-0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.404887 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.404914 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-cert-memcached-mtls\") pod \"watcher-kuttl-api-1\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.405042 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.405088 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-combined-ca-bundle\") pod \"watcher-kuttl-api-1\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.405120 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.405141 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.405163 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.405188 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-custom-prometheus-ca\") pod \"watcher-kuttl-api-1\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.431870 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.433215 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.436458 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.438412 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507289 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507338 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkgrs\" (UniqueName: \"kubernetes.io/projected/9b2e56bc-7917-4cf5-83f0-4919fd154299-kube-api-access-rkgrs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507361 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507383 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507400 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlsp5\" (UniqueName: \"kubernetes.io/projected/37a56683-e6d7-447b-9d7d-c40eef531ba0-kube-api-access-dlsp5\") pod \"watcher-kuttl-api-0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507420 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507444 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-cert-memcached-mtls\") pod \"watcher-kuttl-api-1\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507468 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b2e56bc-7917-4cf5-83f0-4919fd154299-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507485 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507511 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507530 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-combined-ca-bundle\") pod \"watcher-kuttl-api-1\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507551 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507567 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507583 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507602 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-custom-prometheus-ca\") pod \"watcher-kuttl-api-1\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507620 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507642 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507658 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-726jq\" (UniqueName: \"kubernetes.io/projected/1147e46b-b24d-443a-87a8-681ad84ede4b-kube-api-access-726jq\") pod \"watcher-kuttl-api-1\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507674 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-config-data\") pod \"watcher-kuttl-api-1\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507712 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1147e46b-b24d-443a-87a8-681ad84ede4b-logs\") pod \"watcher-kuttl-api-1\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507728 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507747 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37a56683-e6d7-447b-9d7d-c40eef531ba0-logs\") pod \"watcher-kuttl-api-0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.507764 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlp92\" (UniqueName: \"kubernetes.io/projected/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-kube-api-access-mlp92\") pod \"watcher-kuttl-applier-0\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.508390 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.509407 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1147e46b-b24d-443a-87a8-681ad84ede4b-logs\") pod \"watcher-kuttl-api-1\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.510161 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37a56683-e6d7-447b-9d7d-c40eef531ba0-logs\") pod \"watcher-kuttl-api-0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.512765 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.512784 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.513194 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.520883 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-cert-memcached-mtls\") pod \"watcher-kuttl-api-1\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.522754 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-custom-prometheus-ca\") pod \"watcher-kuttl-api-1\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.522818 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-config-data\") pod \"watcher-kuttl-api-1\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.523316 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.523497 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.524013 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.524293 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-726jq\" (UniqueName: \"kubernetes.io/projected/1147e46b-b24d-443a-87a8-681ad84ede4b-kube-api-access-726jq\") pod \"watcher-kuttl-api-1\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.524401 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlp92\" (UniqueName: \"kubernetes.io/projected/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-kube-api-access-mlp92\") pod \"watcher-kuttl-applier-0\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.524621 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.525895 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-combined-ca-bundle\") pod \"watcher-kuttl-api-1\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.526616 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlsp5\" (UniqueName: \"kubernetes.io/projected/37a56683-e6d7-447b-9d7d-c40eef531ba0-kube-api-access-dlsp5\") pod \"watcher-kuttl-api-0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.609636 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.610031 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.609806 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.610096 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkgrs\" (UniqueName: \"kubernetes.io/projected/9b2e56bc-7917-4cf5-83f0-4919fd154299-kube-api-access-rkgrs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.610122 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.610170 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b2e56bc-7917-4cf5-83f0-4919fd154299-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.610193 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.610831 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b2e56bc-7917-4cf5-83f0-4919fd154299-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.614481 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.618349 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.618716 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.626873 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.630626 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.632620 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkgrs\" (UniqueName: \"kubernetes.io/projected/9b2e56bc-7917-4cf5-83f0-4919fd154299-kube-api-access-rkgrs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.638946 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:40 crc kubenswrapper[4818]: I0930 17:26:40.750909 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:41 crc kubenswrapper[4818]: I0930 17:26:41.148378 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:26:41 crc kubenswrapper[4818]: I0930 17:26:41.288695 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:26:41 crc kubenswrapper[4818]: I0930 17:26:41.390998 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:26:41 crc kubenswrapper[4818]: I0930 17:26:41.512732 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Sep 30 17:26:42 crc kubenswrapper[4818]: I0930 17:26:42.056018 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"4b9b8c1d-e808-496e-9044-7b4b9f1d6416","Type":"ContainerStarted","Data":"787eb9a9e23b0d31fa4f24ef42a9edbb369229c0b1d17ca3fc0d5a5b32fd6ceb"} Sep 30 17:26:42 crc kubenswrapper[4818]: I0930 17:26:42.056485 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"4b9b8c1d-e808-496e-9044-7b4b9f1d6416","Type":"ContainerStarted","Data":"3eba3a9fc5d20adc5023415d38f1e320f77a719b2ce4106fc41367fced848aa6"} Sep 30 17:26:42 crc kubenswrapper[4818]: I0930 17:26:42.058272 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"9b2e56bc-7917-4cf5-83f0-4919fd154299","Type":"ContainerStarted","Data":"efbf3be4f5710efdb224f4ca9abed01a77e43dc1d971b46dc2be3f5373df2d7e"} Sep 30 17:26:42 crc kubenswrapper[4818]: I0930 17:26:42.058303 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"9b2e56bc-7917-4cf5-83f0-4919fd154299","Type":"ContainerStarted","Data":"b7065a79a4a4e975feb3a52a639c16061293094de9ed4c4f59e7e3b0a7824fd9"} Sep 30 17:26:42 crc kubenswrapper[4818]: I0930 17:26:42.060439 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"37a56683-e6d7-447b-9d7d-c40eef531ba0","Type":"ContainerStarted","Data":"bb2696e1f00194567a2b0562aa02d0ab7d2a855665d8b5ba864180a61e5b64ba"} Sep 30 17:26:42 crc kubenswrapper[4818]: I0930 17:26:42.060548 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"37a56683-e6d7-447b-9d7d-c40eef531ba0","Type":"ContainerStarted","Data":"ddb95daad0361e724f8fdcbcf0c91717b276af5faaa75b84ac7c2993710eb6be"} Sep 30 17:26:42 crc kubenswrapper[4818]: I0930 17:26:42.060651 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:42 crc kubenswrapper[4818]: I0930 17:26:42.060727 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"37a56683-e6d7-447b-9d7d-c40eef531ba0","Type":"ContainerStarted","Data":"4e427a2924d1aaabb697fcd7539e34f1f280270488712f376514942a80e59831"} Sep 30 17:26:42 crc kubenswrapper[4818]: I0930 17:26:42.064039 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"1147e46b-b24d-443a-87a8-681ad84ede4b","Type":"ContainerStarted","Data":"ac7a5f8ed02a9d0af9790e1e2a2cfdc2a801bcabeecdf1a7d0fa36e99d20c7b2"} Sep 30 17:26:42 crc kubenswrapper[4818]: I0930 17:26:42.064097 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"1147e46b-b24d-443a-87a8-681ad84ede4b","Type":"ContainerStarted","Data":"05ee844c617c97210c54f16c38ad391be5d8e4f2b8b897853832927ed84fa6d4"} Sep 30 17:26:42 crc kubenswrapper[4818]: I0930 17:26:42.064127 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"1147e46b-b24d-443a-87a8-681ad84ede4b","Type":"ContainerStarted","Data":"1e29d00a70029bbe0cc0f2dba7782ac171236653f98da2d8cd82e4eaac0fdcbb"} Sep 30 17:26:42 crc kubenswrapper[4818]: I0930 17:26:42.064529 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:42 crc kubenswrapper[4818]: I0930 17:26:42.071478 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-1" podUID="1147e46b-b24d-443a-87a8-681ad84ede4b" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.208:9322/\": dial tcp 10.217.0.208:9322: connect: connection refused" Sep 30 17:26:42 crc kubenswrapper[4818]: I0930 17:26:42.078394 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podStartSLOduration=2.078369643 podStartE2EDuration="2.078369643s" podCreationTimestamp="2025-09-30 17:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:26:42.077460818 +0000 UTC m=+1648.831732654" watchObservedRunningTime="2025-09-30 17:26:42.078369643 +0000 UTC m=+1648.832641459" Sep 30 17:26:42 crc kubenswrapper[4818]: I0930 17:26:42.117908 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=2.117890402 podStartE2EDuration="2.117890402s" podCreationTimestamp="2025-09-30 17:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:26:42.117641305 +0000 UTC m=+1648.871913121" watchObservedRunningTime="2025-09-30 17:26:42.117890402 +0000 UTC m=+1648.872162218" Sep 30 17:26:42 crc kubenswrapper[4818]: I0930 17:26:42.149034 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-1" podStartSLOduration=2.149013474 podStartE2EDuration="2.149013474s" podCreationTimestamp="2025-09-30 17:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:26:42.144404099 +0000 UTC m=+1648.898675925" watchObservedRunningTime="2025-09-30 17:26:42.149013474 +0000 UTC m=+1648.903285300" Sep 30 17:26:42 crc kubenswrapper[4818]: I0930 17:26:42.168861 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=2.16884144 podStartE2EDuration="2.16884144s" podCreationTimestamp="2025-09-30 17:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:26:42.163526846 +0000 UTC m=+1648.917798662" watchObservedRunningTime="2025-09-30 17:26:42.16884144 +0000 UTC m=+1648.923113256" Sep 30 17:26:44 crc kubenswrapper[4818]: I0930 17:26:44.382530 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:45 crc kubenswrapper[4818]: I0930 17:26:45.404969 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:45 crc kubenswrapper[4818]: I0930 17:26:45.610454 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:45 crc kubenswrapper[4818]: I0930 17:26:45.632091 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:45 crc kubenswrapper[4818]: I0930 17:26:45.640235 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:48 crc kubenswrapper[4818]: I0930 17:26:48.020579 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:26:48 crc kubenswrapper[4818]: E0930 17:26:48.021094 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:26:50 crc kubenswrapper[4818]: I0930 17:26:50.209266 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:50 crc kubenswrapper[4818]: I0930 17:26:50.611118 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:50 crc kubenswrapper[4818]: I0930 17:26:50.618139 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:50 crc kubenswrapper[4818]: I0930 17:26:50.631482 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:50 crc kubenswrapper[4818]: I0930 17:26:50.636246 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:50 crc kubenswrapper[4818]: I0930 17:26:50.639765 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:50 crc kubenswrapper[4818]: I0930 17:26:50.679385 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:50 crc kubenswrapper[4818]: I0930 17:26:50.752173 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:50 crc kubenswrapper[4818]: I0930 17:26:50.779011 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:51 crc kubenswrapper[4818]: I0930 17:26:51.138826 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:51 crc kubenswrapper[4818]: I0930 17:26:51.145298 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:26:51 crc kubenswrapper[4818]: I0930 17:26:51.145555 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:26:51 crc kubenswrapper[4818]: I0930 17:26:51.171508 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:26:51 crc kubenswrapper[4818]: I0930 17:26:51.186583 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:26:52 crc kubenswrapper[4818]: I0930 17:26:52.260264 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:26:52 crc kubenswrapper[4818]: I0930 17:26:52.260906 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerName="proxy-httpd" containerID="cri-o://d787ea95c262090693fae9c417cc5000590e3b6310e3acc55658534893999ed2" gracePeriod=30 Sep 30 17:26:52 crc kubenswrapper[4818]: I0930 17:26:52.261015 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerName="ceilometer-notification-agent" containerID="cri-o://f94b002387872a7228db1803dcd048b31d6818707b3b92f3b53200b4c442977d" gracePeriod=30 Sep 30 17:26:52 crc kubenswrapper[4818]: I0930 17:26:52.260906 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerName="sg-core" containerID="cri-o://8a2cc36da3eb19462dcb9374bac730512a177bac6de66565f8024786849fd15b" gracePeriod=30 Sep 30 17:26:52 crc kubenswrapper[4818]: I0930 17:26:52.260792 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerName="ceilometer-central-agent" containerID="cri-o://c391cbf046f64966d143acb6bb6926200c1f61fb7c54c064903e860ed59e4422" gracePeriod=30 Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.165038 4818 generic.go:334] "Generic (PLEG): container finished" podID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerID="d787ea95c262090693fae9c417cc5000590e3b6310e3acc55658534893999ed2" exitCode=0 Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.165447 4818 generic.go:334] "Generic (PLEG): container finished" podID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerID="8a2cc36da3eb19462dcb9374bac730512a177bac6de66565f8024786849fd15b" exitCode=2 Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.165466 4818 generic.go:334] "Generic (PLEG): container finished" podID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerID="f94b002387872a7228db1803dcd048b31d6818707b3b92f3b53200b4c442977d" exitCode=0 Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.165483 4818 generic.go:334] "Generic (PLEG): container finished" podID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerID="c391cbf046f64966d143acb6bb6926200c1f61fb7c54c064903e860ed59e4422" exitCode=0 Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.165113 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"10b497f0-b54f-490a-bb1b-3fba7dee4217","Type":"ContainerDied","Data":"d787ea95c262090693fae9c417cc5000590e3b6310e3acc55658534893999ed2"} Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.165546 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"10b497f0-b54f-490a-bb1b-3fba7dee4217","Type":"ContainerDied","Data":"8a2cc36da3eb19462dcb9374bac730512a177bac6de66565f8024786849fd15b"} Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.165558 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"10b497f0-b54f-490a-bb1b-3fba7dee4217","Type":"ContainerDied","Data":"f94b002387872a7228db1803dcd048b31d6818707b3b92f3b53200b4c442977d"} Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.165568 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"10b497f0-b54f-490a-bb1b-3fba7dee4217","Type":"ContainerDied","Data":"c391cbf046f64966d143acb6bb6926200c1f61fb7c54c064903e860ed59e4422"} Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.437772 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.542508 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-scripts\") pod \"10b497f0-b54f-490a-bb1b-3fba7dee4217\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.542619 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-combined-ca-bundle\") pod \"10b497f0-b54f-490a-bb1b-3fba7dee4217\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.542666 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10b497f0-b54f-490a-bb1b-3fba7dee4217-run-httpd\") pod \"10b497f0-b54f-490a-bb1b-3fba7dee4217\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.542690 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-sg-core-conf-yaml\") pod \"10b497f0-b54f-490a-bb1b-3fba7dee4217\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.542754 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-config-data\") pod \"10b497f0-b54f-490a-bb1b-3fba7dee4217\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.542783 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-ceilometer-tls-certs\") pod \"10b497f0-b54f-490a-bb1b-3fba7dee4217\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.542839 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10b497f0-b54f-490a-bb1b-3fba7dee4217-log-httpd\") pod \"10b497f0-b54f-490a-bb1b-3fba7dee4217\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.542874 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4qt8z\" (UniqueName: \"kubernetes.io/projected/10b497f0-b54f-490a-bb1b-3fba7dee4217-kube-api-access-4qt8z\") pod \"10b497f0-b54f-490a-bb1b-3fba7dee4217\" (UID: \"10b497f0-b54f-490a-bb1b-3fba7dee4217\") " Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.545723 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10b497f0-b54f-490a-bb1b-3fba7dee4217-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "10b497f0-b54f-490a-bb1b-3fba7dee4217" (UID: "10b497f0-b54f-490a-bb1b-3fba7dee4217"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.547209 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10b497f0-b54f-490a-bb1b-3fba7dee4217-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "10b497f0-b54f-490a-bb1b-3fba7dee4217" (UID: "10b497f0-b54f-490a-bb1b-3fba7dee4217"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.548047 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-scripts" (OuterVolumeSpecName: "scripts") pod "10b497f0-b54f-490a-bb1b-3fba7dee4217" (UID: "10b497f0-b54f-490a-bb1b-3fba7dee4217"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.554175 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10b497f0-b54f-490a-bb1b-3fba7dee4217-kube-api-access-4qt8z" (OuterVolumeSpecName: "kube-api-access-4qt8z") pod "10b497f0-b54f-490a-bb1b-3fba7dee4217" (UID: "10b497f0-b54f-490a-bb1b-3fba7dee4217"). InnerVolumeSpecName "kube-api-access-4qt8z". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.593088 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "10b497f0-b54f-490a-bb1b-3fba7dee4217" (UID: "10b497f0-b54f-490a-bb1b-3fba7dee4217"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.593098 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "10b497f0-b54f-490a-bb1b-3fba7dee4217" (UID: "10b497f0-b54f-490a-bb1b-3fba7dee4217"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.645305 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "10b497f0-b54f-490a-bb1b-3fba7dee4217" (UID: "10b497f0-b54f-490a-bb1b-3fba7dee4217"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.646005 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.646032 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.646042 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10b497f0-b54f-490a-bb1b-3fba7dee4217-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.646052 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.646060 4818 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.646069 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10b497f0-b54f-490a-bb1b-3fba7dee4217-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.646077 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4qt8z\" (UniqueName: \"kubernetes.io/projected/10b497f0-b54f-490a-bb1b-3fba7dee4217-kube-api-access-4qt8z\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.679981 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-config-data" (OuterVolumeSpecName: "config-data") pod "10b497f0-b54f-490a-bb1b-3fba7dee4217" (UID: "10b497f0-b54f-490a-bb1b-3fba7dee4217"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:26:53 crc kubenswrapper[4818]: I0930 17:26:53.747868 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10b497f0-b54f-490a-bb1b-3fba7dee4217-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.178411 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"10b497f0-b54f-490a-bb1b-3fba7dee4217","Type":"ContainerDied","Data":"d7977c4b356f494f51e6ada8643b0726a057603ae14d4034c95ac85a13520b1b"} Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.178464 4818 scope.go:117] "RemoveContainer" containerID="d787ea95c262090693fae9c417cc5000590e3b6310e3acc55658534893999ed2" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.178615 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.201673 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.206280 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.218465 4818 scope.go:117] "RemoveContainer" containerID="8a2cc36da3eb19462dcb9374bac730512a177bac6de66565f8024786849fd15b" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.226246 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:26:54 crc kubenswrapper[4818]: E0930 17:26:54.227255 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerName="ceilometer-central-agent" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.227274 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerName="ceilometer-central-agent" Sep 30 17:26:54 crc kubenswrapper[4818]: E0930 17:26:54.227292 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerName="sg-core" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.227299 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerName="sg-core" Sep 30 17:26:54 crc kubenswrapper[4818]: E0930 17:26:54.227307 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerName="ceilometer-notification-agent" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.227313 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerName="ceilometer-notification-agent" Sep 30 17:26:54 crc kubenswrapper[4818]: E0930 17:26:54.227322 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerName="proxy-httpd" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.227328 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerName="proxy-httpd" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.227481 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerName="ceilometer-notification-agent" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.227493 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerName="proxy-httpd" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.227507 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerName="ceilometer-central-agent" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.227517 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="10b497f0-b54f-490a-bb1b-3fba7dee4217" containerName="sg-core" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.229341 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.234006 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.234051 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.234161 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.240506 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.256664 4818 scope.go:117] "RemoveContainer" containerID="f94b002387872a7228db1803dcd048b31d6818707b3b92f3b53200b4c442977d" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.303877 4818 scope.go:117] "RemoveContainer" containerID="c391cbf046f64966d143acb6bb6926200c1f61fb7c54c064903e860ed59e4422" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.356450 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2f7tl\" (UniqueName: \"kubernetes.io/projected/3375b242-4a63-46f2-b281-c32c43250eec-kube-api-access-2f7tl\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.356521 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-config-data\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.356646 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.356709 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-scripts\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.356769 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3375b242-4a63-46f2-b281-c32c43250eec-log-httpd\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.356794 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.356845 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3375b242-4a63-46f2-b281-c32c43250eec-run-httpd\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.356900 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.458262 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2f7tl\" (UniqueName: \"kubernetes.io/projected/3375b242-4a63-46f2-b281-c32c43250eec-kube-api-access-2f7tl\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.458337 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-config-data\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.458373 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.458404 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-scripts\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.458440 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3375b242-4a63-46f2-b281-c32c43250eec-log-httpd\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.458463 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.458499 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3375b242-4a63-46f2-b281-c32c43250eec-run-httpd\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.458522 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.459065 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3375b242-4a63-46f2-b281-c32c43250eec-log-httpd\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.459842 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3375b242-4a63-46f2-b281-c32c43250eec-run-httpd\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.464802 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.465283 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-scripts\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.465349 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-config-data\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.468300 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.468721 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.479653 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2f7tl\" (UniqueName: \"kubernetes.io/projected/3375b242-4a63-46f2-b281-c32c43250eec-kube-api-access-2f7tl\") pod \"ceilometer-0\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:54 crc kubenswrapper[4818]: I0930 17:26:54.582375 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:55 crc kubenswrapper[4818]: I0930 17:26:55.046615 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:26:55 crc kubenswrapper[4818]: W0930 17:26:55.052745 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3375b242_4a63_46f2_b281_c32c43250eec.slice/crio-8110f1b8d9bebfe7159c154cc6cec352bcf40795b1b7fced2c0f40eb40a96d1f WatchSource:0}: Error finding container 8110f1b8d9bebfe7159c154cc6cec352bcf40795b1b7fced2c0f40eb40a96d1f: Status 404 returned error can't find the container with id 8110f1b8d9bebfe7159c154cc6cec352bcf40795b1b7fced2c0f40eb40a96d1f Sep 30 17:26:55 crc kubenswrapper[4818]: I0930 17:26:55.188796 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3375b242-4a63-46f2-b281-c32c43250eec","Type":"ContainerStarted","Data":"8110f1b8d9bebfe7159c154cc6cec352bcf40795b1b7fced2c0f40eb40a96d1f"} Sep 30 17:26:56 crc kubenswrapper[4818]: I0930 17:26:56.029473 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10b497f0-b54f-490a-bb1b-3fba7dee4217" path="/var/lib/kubelet/pods/10b497f0-b54f-490a-bb1b-3fba7dee4217/volumes" Sep 30 17:26:56 crc kubenswrapper[4818]: I0930 17:26:56.197676 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3375b242-4a63-46f2-b281-c32c43250eec","Type":"ContainerStarted","Data":"e1f12d1d5c008853e4252ac8d3aebb747337daa597364e55a1821b86604e091c"} Sep 30 17:26:57 crc kubenswrapper[4818]: I0930 17:26:57.224474 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3375b242-4a63-46f2-b281-c32c43250eec","Type":"ContainerStarted","Data":"f85c0ab1a3f0489fe78a14b55241c2768126c1d80bcfa54cb7e2af7c83dc8ec3"} Sep 30 17:26:58 crc kubenswrapper[4818]: I0930 17:26:58.237688 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3375b242-4a63-46f2-b281-c32c43250eec","Type":"ContainerStarted","Data":"936a4469ed9e77bde8c10cdcef8842c3b0aaa9a0b3531d5a28ac59042e1a3df6"} Sep 30 17:26:59 crc kubenswrapper[4818]: I0930 17:26:59.082886 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/keystone-db-create-s7x5r"] Sep 30 17:26:59 crc kubenswrapper[4818]: I0930 17:26:59.090819 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/keystone-db-create-s7x5r"] Sep 30 17:26:59 crc kubenswrapper[4818]: I0930 17:26:59.249380 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3375b242-4a63-46f2-b281-c32c43250eec","Type":"ContainerStarted","Data":"0968d1f49de3a18f2c51127117f90ac26819fd1810fbcefbc13d74741504e337"} Sep 30 17:26:59 crc kubenswrapper[4818]: I0930 17:26:59.249615 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:26:59 crc kubenswrapper[4818]: I0930 17:26:59.283466 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=1.54684548 podStartE2EDuration="5.283439445s" podCreationTimestamp="2025-09-30 17:26:54 +0000 UTC" firstStartedPulling="2025-09-30 17:26:55.054862371 +0000 UTC m=+1661.809134187" lastFinishedPulling="2025-09-30 17:26:58.791456336 +0000 UTC m=+1665.545728152" observedRunningTime="2025-09-30 17:26:59.277566636 +0000 UTC m=+1666.031838492" watchObservedRunningTime="2025-09-30 17:26:59.283439445 +0000 UTC m=+1666.037711301" Sep 30 17:27:00 crc kubenswrapper[4818]: I0930 17:27:00.020818 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:27:00 crc kubenswrapper[4818]: E0930 17:27:00.021061 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:27:00 crc kubenswrapper[4818]: I0930 17:27:00.031393 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51ba880e-f6e2-4aff-99ea-268e68bc0b94" path="/var/lib/kubelet/pods/51ba880e-f6e2-4aff-99ea-268e68bc0b94/volumes" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.587668 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-2"] Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.590508 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.603800 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-2"] Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.706694 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-cert-memcached-mtls\") pod \"watcher-kuttl-api-2\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.706986 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-combined-ca-bundle\") pod \"watcher-kuttl-api-2\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.707187 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5tqm\" (UniqueName: \"kubernetes.io/projected/cd9dc024-5747-4924-8ee4-27cafb52e04e-kube-api-access-g5tqm\") pod \"watcher-kuttl-api-2\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.707260 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd9dc024-5747-4924-8ee4-27cafb52e04e-logs\") pod \"watcher-kuttl-api-2\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.707301 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-custom-prometheus-ca\") pod \"watcher-kuttl-api-2\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.707419 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-config-data\") pod \"watcher-kuttl-api-2\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.808363 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5tqm\" (UniqueName: \"kubernetes.io/projected/cd9dc024-5747-4924-8ee4-27cafb52e04e-kube-api-access-g5tqm\") pod \"watcher-kuttl-api-2\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.808703 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd9dc024-5747-4924-8ee4-27cafb52e04e-logs\") pod \"watcher-kuttl-api-2\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.808824 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-custom-prometheus-ca\") pod \"watcher-kuttl-api-2\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.809595 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-config-data\") pod \"watcher-kuttl-api-2\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.809737 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-cert-memcached-mtls\") pod \"watcher-kuttl-api-2\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.809873 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-combined-ca-bundle\") pod \"watcher-kuttl-api-2\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.809061 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd9dc024-5747-4924-8ee4-27cafb52e04e-logs\") pod \"watcher-kuttl-api-2\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.815211 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-custom-prometheus-ca\") pod \"watcher-kuttl-api-2\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.815229 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-combined-ca-bundle\") pod \"watcher-kuttl-api-2\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.818988 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-config-data\") pod \"watcher-kuttl-api-2\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.819508 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-cert-memcached-mtls\") pod \"watcher-kuttl-api-2\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.830855 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5tqm\" (UniqueName: \"kubernetes.io/projected/cd9dc024-5747-4924-8ee4-27cafb52e04e-kube-api-access-g5tqm\") pod \"watcher-kuttl-api-2\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:03 crc kubenswrapper[4818]: I0930 17:27:03.916319 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:04 crc kubenswrapper[4818]: I0930 17:27:04.527260 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-2"] Sep 30 17:27:04 crc kubenswrapper[4818]: W0930 17:27:04.548117 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcd9dc024_5747_4924_8ee4_27cafb52e04e.slice/crio-63238a2370ab0bd0ee18996041fa967623914ba1b9697b2749dbb9d494c2d95f WatchSource:0}: Error finding container 63238a2370ab0bd0ee18996041fa967623914ba1b9697b2749dbb9d494c2d95f: Status 404 returned error can't find the container with id 63238a2370ab0bd0ee18996041fa967623914ba1b9697b2749dbb9d494c2d95f Sep 30 17:27:05 crc kubenswrapper[4818]: I0930 17:27:05.326029 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-2" event={"ID":"cd9dc024-5747-4924-8ee4-27cafb52e04e","Type":"ContainerStarted","Data":"3e51ce7f7fb01a667b3d0daad4057cdbd6646e2d99fd8f71f6c72f6659662398"} Sep 30 17:27:05 crc kubenswrapper[4818]: I0930 17:27:05.326298 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-2" event={"ID":"cd9dc024-5747-4924-8ee4-27cafb52e04e","Type":"ContainerStarted","Data":"a8af0098b08afc22a609fe284c39530a59b4893e3105b1bad3ca27263f2f9c6a"} Sep 30 17:27:05 crc kubenswrapper[4818]: I0930 17:27:05.326309 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-2" event={"ID":"cd9dc024-5747-4924-8ee4-27cafb52e04e","Type":"ContainerStarted","Data":"63238a2370ab0bd0ee18996041fa967623914ba1b9697b2749dbb9d494c2d95f"} Sep 30 17:27:05 crc kubenswrapper[4818]: I0930 17:27:05.326325 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:05 crc kubenswrapper[4818]: I0930 17:27:05.348736 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-2" podStartSLOduration=2.348718927 podStartE2EDuration="2.348718927s" podCreationTimestamp="2025-09-30 17:27:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:27:05.343952198 +0000 UTC m=+1672.098224024" watchObservedRunningTime="2025-09-30 17:27:05.348718927 +0000 UTC m=+1672.102990733" Sep 30 17:27:07 crc kubenswrapper[4818]: I0930 17:27:07.492100 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:08 crc kubenswrapper[4818]: I0930 17:27:08.035690 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/keystone-ed0b-account-create-vrnmw"] Sep 30 17:27:08 crc kubenswrapper[4818]: I0930 17:27:08.036323 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/keystone-ed0b-account-create-vrnmw"] Sep 30 17:27:08 crc kubenswrapper[4818]: I0930 17:27:08.917135 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:10 crc kubenswrapper[4818]: I0930 17:27:10.037628 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b072c26-0e09-43f2-ad65-698dcdc5cd4b" path="/var/lib/kubelet/pods/8b072c26-0e09-43f2-ad65-698dcdc5cd4b/volumes" Sep 30 17:27:13 crc kubenswrapper[4818]: I0930 17:27:13.916960 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:13 crc kubenswrapper[4818]: I0930 17:27:13.925512 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:14 crc kubenswrapper[4818]: I0930 17:27:14.032254 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:27:14 crc kubenswrapper[4818]: E0930 17:27:14.033536 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:27:14 crc kubenswrapper[4818]: I0930 17:27:14.418993 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:15 crc kubenswrapper[4818]: I0930 17:27:15.118292 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-2"] Sep 30 17:27:15 crc kubenswrapper[4818]: I0930 17:27:15.146511 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Sep 30 17:27:15 crc kubenswrapper[4818]: I0930 17:27:15.146776 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-1" podUID="1147e46b-b24d-443a-87a8-681ad84ede4b" containerName="watcher-kuttl-api-log" containerID="cri-o://05ee844c617c97210c54f16c38ad391be5d8e4f2b8b897853832927ed84fa6d4" gracePeriod=30 Sep 30 17:27:15 crc kubenswrapper[4818]: I0930 17:27:15.147219 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-1" podUID="1147e46b-b24d-443a-87a8-681ad84ede4b" containerName="watcher-api" containerID="cri-o://ac7a5f8ed02a9d0af9790e1e2a2cfdc2a801bcabeecdf1a7d0fa36e99d20c7b2" gracePeriod=30 Sep 30 17:27:15 crc kubenswrapper[4818]: I0930 17:27:15.422366 4818 generic.go:334] "Generic (PLEG): container finished" podID="1147e46b-b24d-443a-87a8-681ad84ede4b" containerID="05ee844c617c97210c54f16c38ad391be5d8e4f2b8b897853832927ed84fa6d4" exitCode=143 Sep 30 17:27:15 crc kubenswrapper[4818]: I0930 17:27:15.423426 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"1147e46b-b24d-443a-87a8-681ad84ede4b","Type":"ContainerDied","Data":"05ee844c617c97210c54f16c38ad391be5d8e4f2b8b897853832927ed84fa6d4"} Sep 30 17:27:15 crc kubenswrapper[4818]: I0930 17:27:15.640201 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-1" podUID="1147e46b-b24d-443a-87a8-681ad84ede4b" containerName="watcher-kuttl-api-log" probeResult="failure" output="Get \"http://10.217.0.208:9322/\": read tcp 10.217.0.2:36168->10.217.0.208:9322: read: connection reset by peer" Sep 30 17:27:15 crc kubenswrapper[4818]: I0930 17:27:15.641507 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-1" podUID="1147e46b-b24d-443a-87a8-681ad84ede4b" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.208:9322/\": read tcp 10.217.0.2:36170->10.217.0.208:9322: read: connection reset by peer" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.085716 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.235542 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-config-data\") pod \"1147e46b-b24d-443a-87a8-681ad84ede4b\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.235667 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1147e46b-b24d-443a-87a8-681ad84ede4b-logs\") pod \"1147e46b-b24d-443a-87a8-681ad84ede4b\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.235706 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-custom-prometheus-ca\") pod \"1147e46b-b24d-443a-87a8-681ad84ede4b\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.235802 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-combined-ca-bundle\") pod \"1147e46b-b24d-443a-87a8-681ad84ede4b\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.235901 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-726jq\" (UniqueName: \"kubernetes.io/projected/1147e46b-b24d-443a-87a8-681ad84ede4b-kube-api-access-726jq\") pod \"1147e46b-b24d-443a-87a8-681ad84ede4b\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.235963 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-cert-memcached-mtls\") pod \"1147e46b-b24d-443a-87a8-681ad84ede4b\" (UID: \"1147e46b-b24d-443a-87a8-681ad84ede4b\") " Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.238903 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1147e46b-b24d-443a-87a8-681ad84ede4b-logs" (OuterVolumeSpecName: "logs") pod "1147e46b-b24d-443a-87a8-681ad84ede4b" (UID: "1147e46b-b24d-443a-87a8-681ad84ede4b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.241566 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1147e46b-b24d-443a-87a8-681ad84ede4b-kube-api-access-726jq" (OuterVolumeSpecName: "kube-api-access-726jq") pod "1147e46b-b24d-443a-87a8-681ad84ede4b" (UID: "1147e46b-b24d-443a-87a8-681ad84ede4b"). InnerVolumeSpecName "kube-api-access-726jq". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.261135 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "1147e46b-b24d-443a-87a8-681ad84ede4b" (UID: "1147e46b-b24d-443a-87a8-681ad84ede4b"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.267221 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1147e46b-b24d-443a-87a8-681ad84ede4b" (UID: "1147e46b-b24d-443a-87a8-681ad84ede4b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.287486 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-config-data" (OuterVolumeSpecName: "config-data") pod "1147e46b-b24d-443a-87a8-681ad84ede4b" (UID: "1147e46b-b24d-443a-87a8-681ad84ede4b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.316114 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "1147e46b-b24d-443a-87a8-681ad84ede4b" (UID: "1147e46b-b24d-443a-87a8-681ad84ede4b"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.339692 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.339739 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1147e46b-b24d-443a-87a8-681ad84ede4b-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.339751 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.339765 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.339776 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-726jq\" (UniqueName: \"kubernetes.io/projected/1147e46b-b24d-443a-87a8-681ad84ede4b-kube-api-access-726jq\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.339787 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/1147e46b-b24d-443a-87a8-681ad84ede4b-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.433978 4818 generic.go:334] "Generic (PLEG): container finished" podID="1147e46b-b24d-443a-87a8-681ad84ede4b" containerID="ac7a5f8ed02a9d0af9790e1e2a2cfdc2a801bcabeecdf1a7d0fa36e99d20c7b2" exitCode=0 Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.434033 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"1147e46b-b24d-443a-87a8-681ad84ede4b","Type":"ContainerDied","Data":"ac7a5f8ed02a9d0af9790e1e2a2cfdc2a801bcabeecdf1a7d0fa36e99d20c7b2"} Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.434083 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"1147e46b-b24d-443a-87a8-681ad84ede4b","Type":"ContainerDied","Data":"1e29d00a70029bbe0cc0f2dba7782ac171236653f98da2d8cd82e4eaac0fdcbb"} Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.434107 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.434133 4818 scope.go:117] "RemoveContainer" containerID="ac7a5f8ed02a9d0af9790e1e2a2cfdc2a801bcabeecdf1a7d0fa36e99d20c7b2" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.434277 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-2" podUID="cd9dc024-5747-4924-8ee4-27cafb52e04e" containerName="watcher-kuttl-api-log" containerID="cri-o://a8af0098b08afc22a609fe284c39530a59b4893e3105b1bad3ca27263f2f9c6a" gracePeriod=30 Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.434328 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-2" podUID="cd9dc024-5747-4924-8ee4-27cafb52e04e" containerName="watcher-api" containerID="cri-o://3e51ce7f7fb01a667b3d0daad4057cdbd6646e2d99fd8f71f6c72f6659662398" gracePeriod=30 Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.459403 4818 scope.go:117] "RemoveContainer" containerID="05ee844c617c97210c54f16c38ad391be5d8e4f2b8b897853832927ed84fa6d4" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.485606 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.493267 4818 scope.go:117] "RemoveContainer" containerID="ac7a5f8ed02a9d0af9790e1e2a2cfdc2a801bcabeecdf1a7d0fa36e99d20c7b2" Sep 30 17:27:16 crc kubenswrapper[4818]: E0930 17:27:16.493936 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac7a5f8ed02a9d0af9790e1e2a2cfdc2a801bcabeecdf1a7d0fa36e99d20c7b2\": container with ID starting with ac7a5f8ed02a9d0af9790e1e2a2cfdc2a801bcabeecdf1a7d0fa36e99d20c7b2 not found: ID does not exist" containerID="ac7a5f8ed02a9d0af9790e1e2a2cfdc2a801bcabeecdf1a7d0fa36e99d20c7b2" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.493968 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac7a5f8ed02a9d0af9790e1e2a2cfdc2a801bcabeecdf1a7d0fa36e99d20c7b2"} err="failed to get container status \"ac7a5f8ed02a9d0af9790e1e2a2cfdc2a801bcabeecdf1a7d0fa36e99d20c7b2\": rpc error: code = NotFound desc = could not find container \"ac7a5f8ed02a9d0af9790e1e2a2cfdc2a801bcabeecdf1a7d0fa36e99d20c7b2\": container with ID starting with ac7a5f8ed02a9d0af9790e1e2a2cfdc2a801bcabeecdf1a7d0fa36e99d20c7b2 not found: ID does not exist" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.493992 4818 scope.go:117] "RemoveContainer" containerID="05ee844c617c97210c54f16c38ad391be5d8e4f2b8b897853832927ed84fa6d4" Sep 30 17:27:16 crc kubenswrapper[4818]: E0930 17:27:16.495112 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05ee844c617c97210c54f16c38ad391be5d8e4f2b8b897853832927ed84fa6d4\": container with ID starting with 05ee844c617c97210c54f16c38ad391be5d8e4f2b8b897853832927ed84fa6d4 not found: ID does not exist" containerID="05ee844c617c97210c54f16c38ad391be5d8e4f2b8b897853832927ed84fa6d4" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.495149 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05ee844c617c97210c54f16c38ad391be5d8e4f2b8b897853832927ed84fa6d4"} err="failed to get container status \"05ee844c617c97210c54f16c38ad391be5d8e4f2b8b897853832927ed84fa6d4\": rpc error: code = NotFound desc = could not find container \"05ee844c617c97210c54f16c38ad391be5d8e4f2b8b897853832927ed84fa6d4\": container with ID starting with 05ee844c617c97210c54f16c38ad391be5d8e4f2b8b897853832927ed84fa6d4 not found: ID does not exist" Sep 30 17:27:16 crc kubenswrapper[4818]: I0930 17:27:16.498716 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.411687 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.457409 4818 generic.go:334] "Generic (PLEG): container finished" podID="cd9dc024-5747-4924-8ee4-27cafb52e04e" containerID="3e51ce7f7fb01a667b3d0daad4057cdbd6646e2d99fd8f71f6c72f6659662398" exitCode=0 Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.457452 4818 generic.go:334] "Generic (PLEG): container finished" podID="cd9dc024-5747-4924-8ee4-27cafb52e04e" containerID="a8af0098b08afc22a609fe284c39530a59b4893e3105b1bad3ca27263f2f9c6a" exitCode=143 Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.457505 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-2" event={"ID":"cd9dc024-5747-4924-8ee4-27cafb52e04e","Type":"ContainerDied","Data":"3e51ce7f7fb01a667b3d0daad4057cdbd6646e2d99fd8f71f6c72f6659662398"} Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.457537 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-2" event={"ID":"cd9dc024-5747-4924-8ee4-27cafb52e04e","Type":"ContainerDied","Data":"a8af0098b08afc22a609fe284c39530a59b4893e3105b1bad3ca27263f2f9c6a"} Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.457553 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-2" event={"ID":"cd9dc024-5747-4924-8ee4-27cafb52e04e","Type":"ContainerDied","Data":"63238a2370ab0bd0ee18996041fa967623914ba1b9697b2749dbb9d494c2d95f"} Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.457579 4818 scope.go:117] "RemoveContainer" containerID="3e51ce7f7fb01a667b3d0daad4057cdbd6646e2d99fd8f71f6c72f6659662398" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.457881 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-2" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.503626 4818 scope.go:117] "RemoveContainer" containerID="a8af0098b08afc22a609fe284c39530a59b4893e3105b1bad3ca27263f2f9c6a" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.545118 4818 scope.go:117] "RemoveContainer" containerID="3e51ce7f7fb01a667b3d0daad4057cdbd6646e2d99fd8f71f6c72f6659662398" Sep 30 17:27:17 crc kubenswrapper[4818]: E0930 17:27:17.545593 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e51ce7f7fb01a667b3d0daad4057cdbd6646e2d99fd8f71f6c72f6659662398\": container with ID starting with 3e51ce7f7fb01a667b3d0daad4057cdbd6646e2d99fd8f71f6c72f6659662398 not found: ID does not exist" containerID="3e51ce7f7fb01a667b3d0daad4057cdbd6646e2d99fd8f71f6c72f6659662398" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.545652 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e51ce7f7fb01a667b3d0daad4057cdbd6646e2d99fd8f71f6c72f6659662398"} err="failed to get container status \"3e51ce7f7fb01a667b3d0daad4057cdbd6646e2d99fd8f71f6c72f6659662398\": rpc error: code = NotFound desc = could not find container \"3e51ce7f7fb01a667b3d0daad4057cdbd6646e2d99fd8f71f6c72f6659662398\": container with ID starting with 3e51ce7f7fb01a667b3d0daad4057cdbd6646e2d99fd8f71f6c72f6659662398 not found: ID does not exist" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.545673 4818 scope.go:117] "RemoveContainer" containerID="a8af0098b08afc22a609fe284c39530a59b4893e3105b1bad3ca27263f2f9c6a" Sep 30 17:27:17 crc kubenswrapper[4818]: E0930 17:27:17.545835 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8af0098b08afc22a609fe284c39530a59b4893e3105b1bad3ca27263f2f9c6a\": container with ID starting with a8af0098b08afc22a609fe284c39530a59b4893e3105b1bad3ca27263f2f9c6a not found: ID does not exist" containerID="a8af0098b08afc22a609fe284c39530a59b4893e3105b1bad3ca27263f2f9c6a" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.545860 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8af0098b08afc22a609fe284c39530a59b4893e3105b1bad3ca27263f2f9c6a"} err="failed to get container status \"a8af0098b08afc22a609fe284c39530a59b4893e3105b1bad3ca27263f2f9c6a\": rpc error: code = NotFound desc = could not find container \"a8af0098b08afc22a609fe284c39530a59b4893e3105b1bad3ca27263f2f9c6a\": container with ID starting with a8af0098b08afc22a609fe284c39530a59b4893e3105b1bad3ca27263f2f9c6a not found: ID does not exist" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.545873 4818 scope.go:117] "RemoveContainer" containerID="3e51ce7f7fb01a667b3d0daad4057cdbd6646e2d99fd8f71f6c72f6659662398" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.546059 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e51ce7f7fb01a667b3d0daad4057cdbd6646e2d99fd8f71f6c72f6659662398"} err="failed to get container status \"3e51ce7f7fb01a667b3d0daad4057cdbd6646e2d99fd8f71f6c72f6659662398\": rpc error: code = NotFound desc = could not find container \"3e51ce7f7fb01a667b3d0daad4057cdbd6646e2d99fd8f71f6c72f6659662398\": container with ID starting with 3e51ce7f7fb01a667b3d0daad4057cdbd6646e2d99fd8f71f6c72f6659662398 not found: ID does not exist" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.546076 4818 scope.go:117] "RemoveContainer" containerID="a8af0098b08afc22a609fe284c39530a59b4893e3105b1bad3ca27263f2f9c6a" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.552831 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8af0098b08afc22a609fe284c39530a59b4893e3105b1bad3ca27263f2f9c6a"} err="failed to get container status \"a8af0098b08afc22a609fe284c39530a59b4893e3105b1bad3ca27263f2f9c6a\": rpc error: code = NotFound desc = could not find container \"a8af0098b08afc22a609fe284c39530a59b4893e3105b1bad3ca27263f2f9c6a\": container with ID starting with a8af0098b08afc22a609fe284c39530a59b4893e3105b1bad3ca27263f2f9c6a not found: ID does not exist" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.562481 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5tqm\" (UniqueName: \"kubernetes.io/projected/cd9dc024-5747-4924-8ee4-27cafb52e04e-kube-api-access-g5tqm\") pod \"cd9dc024-5747-4924-8ee4-27cafb52e04e\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.562535 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd9dc024-5747-4924-8ee4-27cafb52e04e-logs\") pod \"cd9dc024-5747-4924-8ee4-27cafb52e04e\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.562591 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-config-data\") pod \"cd9dc024-5747-4924-8ee4-27cafb52e04e\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.562620 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-cert-memcached-mtls\") pod \"cd9dc024-5747-4924-8ee4-27cafb52e04e\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.562752 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-combined-ca-bundle\") pod \"cd9dc024-5747-4924-8ee4-27cafb52e04e\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.562805 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-custom-prometheus-ca\") pod \"cd9dc024-5747-4924-8ee4-27cafb52e04e\" (UID: \"cd9dc024-5747-4924-8ee4-27cafb52e04e\") " Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.583305 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd9dc024-5747-4924-8ee4-27cafb52e04e-logs" (OuterVolumeSpecName: "logs") pod "cd9dc024-5747-4924-8ee4-27cafb52e04e" (UID: "cd9dc024-5747-4924-8ee4-27cafb52e04e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.586773 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd9dc024-5747-4924-8ee4-27cafb52e04e-kube-api-access-g5tqm" (OuterVolumeSpecName: "kube-api-access-g5tqm") pod "cd9dc024-5747-4924-8ee4-27cafb52e04e" (UID: "cd9dc024-5747-4924-8ee4-27cafb52e04e"). InnerVolumeSpecName "kube-api-access-g5tqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.626128 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "cd9dc024-5747-4924-8ee4-27cafb52e04e" (UID: "cd9dc024-5747-4924-8ee4-27cafb52e04e"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.630102 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cd9dc024-5747-4924-8ee4-27cafb52e04e" (UID: "cd9dc024-5747-4924-8ee4-27cafb52e04e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.673852 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5tqm\" (UniqueName: \"kubernetes.io/projected/cd9dc024-5747-4924-8ee4-27cafb52e04e-kube-api-access-g5tqm\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.673885 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd9dc024-5747-4924-8ee4-27cafb52e04e-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.673895 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.673905 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.687073 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "cd9dc024-5747-4924-8ee4-27cafb52e04e" (UID: "cd9dc024-5747-4924-8ee4-27cafb52e04e"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.715599 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-config-data" (OuterVolumeSpecName: "config-data") pod "cd9dc024-5747-4924-8ee4-27cafb52e04e" (UID: "cd9dc024-5747-4924-8ee4-27cafb52e04e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.775408 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.775554 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/cd9dc024-5747-4924-8ee4-27cafb52e04e-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.788201 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-2"] Sep 30 17:27:17 crc kubenswrapper[4818]: I0930 17:27:17.797292 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-2"] Sep 30 17:27:18 crc kubenswrapper[4818]: I0930 17:27:18.034829 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1147e46b-b24d-443a-87a8-681ad84ede4b" path="/var/lib/kubelet/pods/1147e46b-b24d-443a-87a8-681ad84ede4b/volumes" Sep 30 17:27:18 crc kubenswrapper[4818]: I0930 17:27:18.036286 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd9dc024-5747-4924-8ee4-27cafb52e04e" path="/var/lib/kubelet/pods/cd9dc024-5747-4924-8ee4-27cafb52e04e/volumes" Sep 30 17:27:18 crc kubenswrapper[4818]: I0930 17:27:18.386765 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:27:18 crc kubenswrapper[4818]: I0930 17:27:18.387137 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="37a56683-e6d7-447b-9d7d-c40eef531ba0" containerName="watcher-kuttl-api-log" containerID="cri-o://ddb95daad0361e724f8fdcbcf0c91717b276af5faaa75b84ac7c2993710eb6be" gracePeriod=30 Sep 30 17:27:18 crc kubenswrapper[4818]: I0930 17:27:18.387241 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="37a56683-e6d7-447b-9d7d-c40eef531ba0" containerName="watcher-api" containerID="cri-o://bb2696e1f00194567a2b0562aa02d0ab7d2a855665d8b5ba864180a61e5b64ba" gracePeriod=30 Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.301399 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.404870 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-cert-memcached-mtls\") pod \"37a56683-e6d7-447b-9d7d-c40eef531ba0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.405057 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-combined-ca-bundle\") pod \"37a56683-e6d7-447b-9d7d-c40eef531ba0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.405115 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-config-data\") pod \"37a56683-e6d7-447b-9d7d-c40eef531ba0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.405217 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37a56683-e6d7-447b-9d7d-c40eef531ba0-logs\") pod \"37a56683-e6d7-447b-9d7d-c40eef531ba0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.405306 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-custom-prometheus-ca\") pod \"37a56683-e6d7-447b-9d7d-c40eef531ba0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.405351 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dlsp5\" (UniqueName: \"kubernetes.io/projected/37a56683-e6d7-447b-9d7d-c40eef531ba0-kube-api-access-dlsp5\") pod \"37a56683-e6d7-447b-9d7d-c40eef531ba0\" (UID: \"37a56683-e6d7-447b-9d7d-c40eef531ba0\") " Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.405624 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37a56683-e6d7-447b-9d7d-c40eef531ba0-logs" (OuterVolumeSpecName: "logs") pod "37a56683-e6d7-447b-9d7d-c40eef531ba0" (UID: "37a56683-e6d7-447b-9d7d-c40eef531ba0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.405964 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37a56683-e6d7-447b-9d7d-c40eef531ba0-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.409005 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37a56683-e6d7-447b-9d7d-c40eef531ba0-kube-api-access-dlsp5" (OuterVolumeSpecName: "kube-api-access-dlsp5") pod "37a56683-e6d7-447b-9d7d-c40eef531ba0" (UID: "37a56683-e6d7-447b-9d7d-c40eef531ba0"). InnerVolumeSpecName "kube-api-access-dlsp5". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.452422 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "37a56683-e6d7-447b-9d7d-c40eef531ba0" (UID: "37a56683-e6d7-447b-9d7d-c40eef531ba0"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.456572 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-config-data" (OuterVolumeSpecName: "config-data") pod "37a56683-e6d7-447b-9d7d-c40eef531ba0" (UID: "37a56683-e6d7-447b-9d7d-c40eef531ba0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.470469 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "37a56683-e6d7-447b-9d7d-c40eef531ba0" (UID: "37a56683-e6d7-447b-9d7d-c40eef531ba0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.480775 4818 generic.go:334] "Generic (PLEG): container finished" podID="37a56683-e6d7-447b-9d7d-c40eef531ba0" containerID="bb2696e1f00194567a2b0562aa02d0ab7d2a855665d8b5ba864180a61e5b64ba" exitCode=0 Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.480815 4818 generic.go:334] "Generic (PLEG): container finished" podID="37a56683-e6d7-447b-9d7d-c40eef531ba0" containerID="ddb95daad0361e724f8fdcbcf0c91717b276af5faaa75b84ac7c2993710eb6be" exitCode=143 Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.480839 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"37a56683-e6d7-447b-9d7d-c40eef531ba0","Type":"ContainerDied","Data":"bb2696e1f00194567a2b0562aa02d0ab7d2a855665d8b5ba864180a61e5b64ba"} Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.480871 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"37a56683-e6d7-447b-9d7d-c40eef531ba0","Type":"ContainerDied","Data":"ddb95daad0361e724f8fdcbcf0c91717b276af5faaa75b84ac7c2993710eb6be"} Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.480883 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"37a56683-e6d7-447b-9d7d-c40eef531ba0","Type":"ContainerDied","Data":"4e427a2924d1aaabb697fcd7539e34f1f280270488712f376514942a80e59831"} Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.480902 4818 scope.go:117] "RemoveContainer" containerID="bb2696e1f00194567a2b0562aa02d0ab7d2a855665d8b5ba864180a61e5b64ba" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.481088 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.503933 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "37a56683-e6d7-447b-9d7d-c40eef531ba0" (UID: "37a56683-e6d7-447b-9d7d-c40eef531ba0"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.507739 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.507784 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.507803 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dlsp5\" (UniqueName: \"kubernetes.io/projected/37a56683-e6d7-447b-9d7d-c40eef531ba0-kube-api-access-dlsp5\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.507817 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.507835 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37a56683-e6d7-447b-9d7d-c40eef531ba0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.567323 4818 scope.go:117] "RemoveContainer" containerID="ddb95daad0361e724f8fdcbcf0c91717b276af5faaa75b84ac7c2993710eb6be" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.590373 4818 scope.go:117] "RemoveContainer" containerID="bb2696e1f00194567a2b0562aa02d0ab7d2a855665d8b5ba864180a61e5b64ba" Sep 30 17:27:19 crc kubenswrapper[4818]: E0930 17:27:19.593521 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb2696e1f00194567a2b0562aa02d0ab7d2a855665d8b5ba864180a61e5b64ba\": container with ID starting with bb2696e1f00194567a2b0562aa02d0ab7d2a855665d8b5ba864180a61e5b64ba not found: ID does not exist" containerID="bb2696e1f00194567a2b0562aa02d0ab7d2a855665d8b5ba864180a61e5b64ba" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.593561 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb2696e1f00194567a2b0562aa02d0ab7d2a855665d8b5ba864180a61e5b64ba"} err="failed to get container status \"bb2696e1f00194567a2b0562aa02d0ab7d2a855665d8b5ba864180a61e5b64ba\": rpc error: code = NotFound desc = could not find container \"bb2696e1f00194567a2b0562aa02d0ab7d2a855665d8b5ba864180a61e5b64ba\": container with ID starting with bb2696e1f00194567a2b0562aa02d0ab7d2a855665d8b5ba864180a61e5b64ba not found: ID does not exist" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.593588 4818 scope.go:117] "RemoveContainer" containerID="ddb95daad0361e724f8fdcbcf0c91717b276af5faaa75b84ac7c2993710eb6be" Sep 30 17:27:19 crc kubenswrapper[4818]: E0930 17:27:19.597454 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ddb95daad0361e724f8fdcbcf0c91717b276af5faaa75b84ac7c2993710eb6be\": container with ID starting with ddb95daad0361e724f8fdcbcf0c91717b276af5faaa75b84ac7c2993710eb6be not found: ID does not exist" containerID="ddb95daad0361e724f8fdcbcf0c91717b276af5faaa75b84ac7c2993710eb6be" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.597498 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddb95daad0361e724f8fdcbcf0c91717b276af5faaa75b84ac7c2993710eb6be"} err="failed to get container status \"ddb95daad0361e724f8fdcbcf0c91717b276af5faaa75b84ac7c2993710eb6be\": rpc error: code = NotFound desc = could not find container \"ddb95daad0361e724f8fdcbcf0c91717b276af5faaa75b84ac7c2993710eb6be\": container with ID starting with ddb95daad0361e724f8fdcbcf0c91717b276af5faaa75b84ac7c2993710eb6be not found: ID does not exist" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.597528 4818 scope.go:117] "RemoveContainer" containerID="bb2696e1f00194567a2b0562aa02d0ab7d2a855665d8b5ba864180a61e5b64ba" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.599044 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb2696e1f00194567a2b0562aa02d0ab7d2a855665d8b5ba864180a61e5b64ba"} err="failed to get container status \"bb2696e1f00194567a2b0562aa02d0ab7d2a855665d8b5ba864180a61e5b64ba\": rpc error: code = NotFound desc = could not find container \"bb2696e1f00194567a2b0562aa02d0ab7d2a855665d8b5ba864180a61e5b64ba\": container with ID starting with bb2696e1f00194567a2b0562aa02d0ab7d2a855665d8b5ba864180a61e5b64ba not found: ID does not exist" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.599068 4818 scope.go:117] "RemoveContainer" containerID="ddb95daad0361e724f8fdcbcf0c91717b276af5faaa75b84ac7c2993710eb6be" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.599339 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddb95daad0361e724f8fdcbcf0c91717b276af5faaa75b84ac7c2993710eb6be"} err="failed to get container status \"ddb95daad0361e724f8fdcbcf0c91717b276af5faaa75b84ac7c2993710eb6be\": rpc error: code = NotFound desc = could not find container \"ddb95daad0361e724f8fdcbcf0c91717b276af5faaa75b84ac7c2993710eb6be\": container with ID starting with ddb95daad0361e724f8fdcbcf0c91717b276af5faaa75b84ac7c2993710eb6be not found: ID does not exist" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.632705 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k"] Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.646300 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-sdl6k"] Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.683172 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.683669 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="4b9b8c1d-e808-496e-9044-7b4b9f1d6416" containerName="watcher-applier" containerID="cri-o://787eb9a9e23b0d31fa4f24ef42a9edbb369229c0b1d17ca3fc0d5a5b32fd6ceb" gracePeriod=30 Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.721941 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.722149 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="9b2e56bc-7917-4cf5-83f0-4919fd154299" containerName="watcher-decision-engine" containerID="cri-o://efbf3be4f5710efdb224f4ca9abed01a77e43dc1d971b46dc2be3f5373df2d7e" gracePeriod=30 Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.752545 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher8f08-account-delete-r4vvp"] Sep 30 17:27:19 crc kubenswrapper[4818]: E0930 17:27:19.752867 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37a56683-e6d7-447b-9d7d-c40eef531ba0" containerName="watcher-api" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.752883 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="37a56683-e6d7-447b-9d7d-c40eef531ba0" containerName="watcher-api" Sep 30 17:27:19 crc kubenswrapper[4818]: E0930 17:27:19.752904 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd9dc024-5747-4924-8ee4-27cafb52e04e" containerName="watcher-kuttl-api-log" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.752911 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd9dc024-5747-4924-8ee4-27cafb52e04e" containerName="watcher-kuttl-api-log" Sep 30 17:27:19 crc kubenswrapper[4818]: E0930 17:27:19.752937 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1147e46b-b24d-443a-87a8-681ad84ede4b" containerName="watcher-kuttl-api-log" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.752943 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="1147e46b-b24d-443a-87a8-681ad84ede4b" containerName="watcher-kuttl-api-log" Sep 30 17:27:19 crc kubenswrapper[4818]: E0930 17:27:19.752951 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1147e46b-b24d-443a-87a8-681ad84ede4b" containerName="watcher-api" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.752957 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="1147e46b-b24d-443a-87a8-681ad84ede4b" containerName="watcher-api" Sep 30 17:27:19 crc kubenswrapper[4818]: E0930 17:27:19.752967 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37a56683-e6d7-447b-9d7d-c40eef531ba0" containerName="watcher-kuttl-api-log" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.752973 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="37a56683-e6d7-447b-9d7d-c40eef531ba0" containerName="watcher-kuttl-api-log" Sep 30 17:27:19 crc kubenswrapper[4818]: E0930 17:27:19.752987 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd9dc024-5747-4924-8ee4-27cafb52e04e" containerName="watcher-api" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.752993 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd9dc024-5747-4924-8ee4-27cafb52e04e" containerName="watcher-api" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.753120 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="37a56683-e6d7-447b-9d7d-c40eef531ba0" containerName="watcher-kuttl-api-log" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.753130 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="1147e46b-b24d-443a-87a8-681ad84ede4b" containerName="watcher-api" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.753137 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd9dc024-5747-4924-8ee4-27cafb52e04e" containerName="watcher-api" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.753146 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="37a56683-e6d7-447b-9d7d-c40eef531ba0" containerName="watcher-api" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.753162 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd9dc024-5747-4924-8ee4-27cafb52e04e" containerName="watcher-kuttl-api-log" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.753171 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="1147e46b-b24d-443a-87a8-681ad84ede4b" containerName="watcher-kuttl-api-log" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.753723 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher8f08-account-delete-r4vvp" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.764369 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher8f08-account-delete-r4vvp"] Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.800781 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-db-create-6lqgb"] Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.836654 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-db-create-6lqgb"] Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.853520 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-8f08-account-create-fq6nv"] Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.867194 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-8f08-account-create-fq6nv"] Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.880082 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher8f08-account-delete-r4vvp"] Sep 30 17:27:19 crc kubenswrapper[4818]: E0930 17:27:19.880804 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-xcn5d], unattached volumes=[], failed to process volumes=[]: context canceled" pod="watcher-kuttl-default/watcher8f08-account-delete-r4vvp" podUID="b8b8df42-ca37-4307-87d8-224b21cac3c8" Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.887342 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.893803 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:27:19 crc kubenswrapper[4818]: I0930 17:27:19.916636 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcn5d\" (UniqueName: \"kubernetes.io/projected/b8b8df42-ca37-4307-87d8-224b21cac3c8-kube-api-access-xcn5d\") pod \"watcher8f08-account-delete-r4vvp\" (UID: \"b8b8df42-ca37-4307-87d8-224b21cac3c8\") " pod="watcher-kuttl-default/watcher8f08-account-delete-r4vvp" Sep 30 17:27:20 crc kubenswrapper[4818]: I0930 17:27:20.018565 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcn5d\" (UniqueName: \"kubernetes.io/projected/b8b8df42-ca37-4307-87d8-224b21cac3c8-kube-api-access-xcn5d\") pod \"watcher8f08-account-delete-r4vvp\" (UID: \"b8b8df42-ca37-4307-87d8-224b21cac3c8\") " pod="watcher-kuttl-default/watcher8f08-account-delete-r4vvp" Sep 30 17:27:20 crc kubenswrapper[4818]: I0930 17:27:20.036406 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37a56683-e6d7-447b-9d7d-c40eef531ba0" path="/var/lib/kubelet/pods/37a56683-e6d7-447b-9d7d-c40eef531ba0/volumes" Sep 30 17:27:20 crc kubenswrapper[4818]: I0930 17:27:20.037269 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3948095a-c90c-4ad2-85cf-2d269bd00bb8" path="/var/lib/kubelet/pods/3948095a-c90c-4ad2-85cf-2d269bd00bb8/volumes" Sep 30 17:27:20 crc kubenswrapper[4818]: I0930 17:27:20.038038 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="408f4ce8-e091-4a55-8597-40cc51e3082e" path="/var/lib/kubelet/pods/408f4ce8-e091-4a55-8597-40cc51e3082e/volumes" Sep 30 17:27:20 crc kubenswrapper[4818]: I0930 17:27:20.038985 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70245c64-f8a6-4309-ae8d-f55cb4227c3c" path="/var/lib/kubelet/pods/70245c64-f8a6-4309-ae8d-f55cb4227c3c/volumes" Sep 30 17:27:20 crc kubenswrapper[4818]: I0930 17:27:20.048958 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcn5d\" (UniqueName: \"kubernetes.io/projected/b8b8df42-ca37-4307-87d8-224b21cac3c8-kube-api-access-xcn5d\") pod \"watcher8f08-account-delete-r4vvp\" (UID: \"b8b8df42-ca37-4307-87d8-224b21cac3c8\") " pod="watcher-kuttl-default/watcher8f08-account-delete-r4vvp" Sep 30 17:27:20 crc kubenswrapper[4818]: I0930 17:27:20.489668 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher8f08-account-delete-r4vvp" Sep 30 17:27:20 crc kubenswrapper[4818]: I0930 17:27:20.499650 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher8f08-account-delete-r4vvp" Sep 30 17:27:20 crc kubenswrapper[4818]: I0930 17:27:20.627499 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcn5d\" (UniqueName: \"kubernetes.io/projected/b8b8df42-ca37-4307-87d8-224b21cac3c8-kube-api-access-xcn5d\") pod \"b8b8df42-ca37-4307-87d8-224b21cac3c8\" (UID: \"b8b8df42-ca37-4307-87d8-224b21cac3c8\") " Sep 30 17:27:20 crc kubenswrapper[4818]: I0930 17:27:20.634104 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8b8df42-ca37-4307-87d8-224b21cac3c8-kube-api-access-xcn5d" (OuterVolumeSpecName: "kube-api-access-xcn5d") pod "b8b8df42-ca37-4307-87d8-224b21cac3c8" (UID: "b8b8df42-ca37-4307-87d8-224b21cac3c8"). InnerVolumeSpecName "kube-api-access-xcn5d". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:27:20 crc kubenswrapper[4818]: E0930 17:27:20.643244 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="787eb9a9e23b0d31fa4f24ef42a9edbb369229c0b1d17ca3fc0d5a5b32fd6ceb" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 30 17:27:20 crc kubenswrapper[4818]: E0930 17:27:20.645230 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="787eb9a9e23b0d31fa4f24ef42a9edbb369229c0b1d17ca3fc0d5a5b32fd6ceb" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 30 17:27:20 crc kubenswrapper[4818]: E0930 17:27:20.647293 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="787eb9a9e23b0d31fa4f24ef42a9edbb369229c0b1d17ca3fc0d5a5b32fd6ceb" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 30 17:27:20 crc kubenswrapper[4818]: E0930 17:27:20.647351 4818 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="4b9b8c1d-e808-496e-9044-7b4b9f1d6416" containerName="watcher-applier" Sep 30 17:27:20 crc kubenswrapper[4818]: I0930 17:27:20.729598 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcn5d\" (UniqueName: \"kubernetes.io/projected/b8b8df42-ca37-4307-87d8-224b21cac3c8-kube-api-access-xcn5d\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:21 crc kubenswrapper[4818]: I0930 17:27:21.496134 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher8f08-account-delete-r4vvp" Sep 30 17:27:21 crc kubenswrapper[4818]: I0930 17:27:21.531907 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher8f08-account-delete-r4vvp"] Sep 30 17:27:21 crc kubenswrapper[4818]: I0930 17:27:21.537643 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher8f08-account-delete-r4vvp"] Sep 30 17:27:21 crc kubenswrapper[4818]: I0930 17:27:21.968544 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:27:21 crc kubenswrapper[4818]: I0930 17:27:21.968862 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="3375b242-4a63-46f2-b281-c32c43250eec" containerName="ceilometer-central-agent" containerID="cri-o://e1f12d1d5c008853e4252ac8d3aebb747337daa597364e55a1821b86604e091c" gracePeriod=30 Sep 30 17:27:21 crc kubenswrapper[4818]: I0930 17:27:21.969057 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="3375b242-4a63-46f2-b281-c32c43250eec" containerName="ceilometer-notification-agent" containerID="cri-o://f85c0ab1a3f0489fe78a14b55241c2768126c1d80bcfa54cb7e2af7c83dc8ec3" gracePeriod=30 Sep 30 17:27:21 crc kubenswrapper[4818]: I0930 17:27:21.969086 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="3375b242-4a63-46f2-b281-c32c43250eec" containerName="sg-core" containerID="cri-o://936a4469ed9e77bde8c10cdcef8842c3b0aaa9a0b3531d5a28ac59042e1a3df6" gracePeriod=30 Sep 30 17:27:21 crc kubenswrapper[4818]: I0930 17:27:21.969138 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="3375b242-4a63-46f2-b281-c32c43250eec" containerName="proxy-httpd" containerID="cri-o://0968d1f49de3a18f2c51127117f90ac26819fd1810fbcefbc13d74741504e337" gracePeriod=30 Sep 30 17:27:21 crc kubenswrapper[4818]: I0930 17:27:21.978582 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/ceilometer-0" podUID="3375b242-4a63-46f2-b281-c32c43250eec" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.211:3000/\": read tcp 10.217.0.2:51872->10.217.0.211:3000: read: connection reset by peer" Sep 30 17:27:22 crc kubenswrapper[4818]: I0930 17:27:22.030405 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8b8df42-ca37-4307-87d8-224b21cac3c8" path="/var/lib/kubelet/pods/b8b8df42-ca37-4307-87d8-224b21cac3c8/volumes" Sep 30 17:27:22 crc kubenswrapper[4818]: I0930 17:27:22.519976 4818 generic.go:334] "Generic (PLEG): container finished" podID="3375b242-4a63-46f2-b281-c32c43250eec" containerID="0968d1f49de3a18f2c51127117f90ac26819fd1810fbcefbc13d74741504e337" exitCode=0 Sep 30 17:27:22 crc kubenswrapper[4818]: I0930 17:27:22.520047 4818 generic.go:334] "Generic (PLEG): container finished" podID="3375b242-4a63-46f2-b281-c32c43250eec" containerID="936a4469ed9e77bde8c10cdcef8842c3b0aaa9a0b3531d5a28ac59042e1a3df6" exitCode=2 Sep 30 17:27:22 crc kubenswrapper[4818]: I0930 17:27:22.520065 4818 generic.go:334] "Generic (PLEG): container finished" podID="3375b242-4a63-46f2-b281-c32c43250eec" containerID="e1f12d1d5c008853e4252ac8d3aebb747337daa597364e55a1821b86604e091c" exitCode=0 Sep 30 17:27:22 crc kubenswrapper[4818]: I0930 17:27:22.520089 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3375b242-4a63-46f2-b281-c32c43250eec","Type":"ContainerDied","Data":"0968d1f49de3a18f2c51127117f90ac26819fd1810fbcefbc13d74741504e337"} Sep 30 17:27:22 crc kubenswrapper[4818]: I0930 17:27:22.520117 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3375b242-4a63-46f2-b281-c32c43250eec","Type":"ContainerDied","Data":"936a4469ed9e77bde8c10cdcef8842c3b0aaa9a0b3531d5a28ac59042e1a3df6"} Sep 30 17:27:22 crc kubenswrapper[4818]: I0930 17:27:22.520129 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3375b242-4a63-46f2-b281-c32c43250eec","Type":"ContainerDied","Data":"e1f12d1d5c008853e4252ac8d3aebb747337daa597364e55a1821b86604e091c"} Sep 30 17:27:22 crc kubenswrapper[4818]: I0930 17:27:22.579216 4818 scope.go:117] "RemoveContainer" containerID="901d3131056e4ad8f7198b406c9332ccc9b826cec978732ee5fe607166a8a5cd" Sep 30 17:27:22 crc kubenswrapper[4818]: I0930 17:27:22.595825 4818 scope.go:117] "RemoveContainer" containerID="9ca1f0c21f686e7a5eba0904588292a1bc8fdf00120ffc0b01aa6a7e9a3c8387" Sep 30 17:27:22 crc kubenswrapper[4818]: I0930 17:27:22.631019 4818 scope.go:117] "RemoveContainer" containerID="42b98b75e653a76209ca45f43fa8d2cc49b3c58d6cc238a15a499d9ca8978f8c" Sep 30 17:27:22 crc kubenswrapper[4818]: I0930 17:27:22.754464 4818 scope.go:117] "RemoveContainer" containerID="590de73a9cd7bafa85597524569c55276179a60f02b0a802fcd2cd96f92db4f3" Sep 30 17:27:22 crc kubenswrapper[4818]: I0930 17:27:22.776385 4818 scope.go:117] "RemoveContainer" containerID="419db01ba387a3cd4de9cfa9061c88c02c19f6944f32028949ecfc1cef57fd4d" Sep 30 17:27:22 crc kubenswrapper[4818]: I0930 17:27:22.982595 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.065099 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-config-data\") pod \"3375b242-4a63-46f2-b281-c32c43250eec\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.065200 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2f7tl\" (UniqueName: \"kubernetes.io/projected/3375b242-4a63-46f2-b281-c32c43250eec-kube-api-access-2f7tl\") pod \"3375b242-4a63-46f2-b281-c32c43250eec\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.065220 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-scripts\") pod \"3375b242-4a63-46f2-b281-c32c43250eec\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.065253 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-ceilometer-tls-certs\") pod \"3375b242-4a63-46f2-b281-c32c43250eec\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.065303 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3375b242-4a63-46f2-b281-c32c43250eec-log-httpd\") pod \"3375b242-4a63-46f2-b281-c32c43250eec\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.065327 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-combined-ca-bundle\") pod \"3375b242-4a63-46f2-b281-c32c43250eec\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.065347 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3375b242-4a63-46f2-b281-c32c43250eec-run-httpd\") pod \"3375b242-4a63-46f2-b281-c32c43250eec\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.065403 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-sg-core-conf-yaml\") pod \"3375b242-4a63-46f2-b281-c32c43250eec\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.066768 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3375b242-4a63-46f2-b281-c32c43250eec-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3375b242-4a63-46f2-b281-c32c43250eec" (UID: "3375b242-4a63-46f2-b281-c32c43250eec"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.066993 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3375b242-4a63-46f2-b281-c32c43250eec-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3375b242-4a63-46f2-b281-c32c43250eec" (UID: "3375b242-4a63-46f2-b281-c32c43250eec"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.072144 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-scripts" (OuterVolumeSpecName: "scripts") pod "3375b242-4a63-46f2-b281-c32c43250eec" (UID: "3375b242-4a63-46f2-b281-c32c43250eec"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.075520 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3375b242-4a63-46f2-b281-c32c43250eec-kube-api-access-2f7tl" (OuterVolumeSpecName: "kube-api-access-2f7tl") pod "3375b242-4a63-46f2-b281-c32c43250eec" (UID: "3375b242-4a63-46f2-b281-c32c43250eec"). InnerVolumeSpecName "kube-api-access-2f7tl". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.112571 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3375b242-4a63-46f2-b281-c32c43250eec" (UID: "3375b242-4a63-46f2-b281-c32c43250eec"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.129476 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "3375b242-4a63-46f2-b281-c32c43250eec" (UID: "3375b242-4a63-46f2-b281-c32c43250eec"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.144164 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3375b242-4a63-46f2-b281-c32c43250eec" (UID: "3375b242-4a63-46f2-b281-c32c43250eec"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.168137 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-config-data" (OuterVolumeSpecName: "config-data") pod "3375b242-4a63-46f2-b281-c32c43250eec" (UID: "3375b242-4a63-46f2-b281-c32c43250eec"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.168600 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-config-data\") pod \"3375b242-4a63-46f2-b281-c32c43250eec\" (UID: \"3375b242-4a63-46f2-b281-c32c43250eec\") " Sep 30 17:27:23 crc kubenswrapper[4818]: W0930 17:27:23.168896 4818 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/3375b242-4a63-46f2-b281-c32c43250eec/volumes/kubernetes.io~secret/config-data Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.168932 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-config-data" (OuterVolumeSpecName: "config-data") pod "3375b242-4a63-46f2-b281-c32c43250eec" (UID: "3375b242-4a63-46f2-b281-c32c43250eec"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.169313 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.169336 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2f7tl\" (UniqueName: \"kubernetes.io/projected/3375b242-4a63-46f2-b281-c32c43250eec-kube-api-access-2f7tl\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.169347 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.169356 4818 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.169384 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3375b242-4a63-46f2-b281-c32c43250eec-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.169395 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.169404 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3375b242-4a63-46f2-b281-c32c43250eec-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.169411 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3375b242-4a63-46f2-b281-c32c43250eec-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.532131 4818 generic.go:334] "Generic (PLEG): container finished" podID="3375b242-4a63-46f2-b281-c32c43250eec" containerID="f85c0ab1a3f0489fe78a14b55241c2768126c1d80bcfa54cb7e2af7c83dc8ec3" exitCode=0 Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.532207 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.532214 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3375b242-4a63-46f2-b281-c32c43250eec","Type":"ContainerDied","Data":"f85c0ab1a3f0489fe78a14b55241c2768126c1d80bcfa54cb7e2af7c83dc8ec3"} Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.532602 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3375b242-4a63-46f2-b281-c32c43250eec","Type":"ContainerDied","Data":"8110f1b8d9bebfe7159c154cc6cec352bcf40795b1b7fced2c0f40eb40a96d1f"} Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.532626 4818 scope.go:117] "RemoveContainer" containerID="0968d1f49de3a18f2c51127117f90ac26819fd1810fbcefbc13d74741504e337" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.557393 4818 scope.go:117] "RemoveContainer" containerID="936a4469ed9e77bde8c10cdcef8842c3b0aaa9a0b3531d5a28ac59042e1a3df6" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.583008 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.594664 4818 scope.go:117] "RemoveContainer" containerID="f85c0ab1a3f0489fe78a14b55241c2768126c1d80bcfa54cb7e2af7c83dc8ec3" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.602642 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.610843 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:27:23 crc kubenswrapper[4818]: E0930 17:27:23.611350 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3375b242-4a63-46f2-b281-c32c43250eec" containerName="sg-core" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.611376 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="3375b242-4a63-46f2-b281-c32c43250eec" containerName="sg-core" Sep 30 17:27:23 crc kubenswrapper[4818]: E0930 17:27:23.611410 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3375b242-4a63-46f2-b281-c32c43250eec" containerName="proxy-httpd" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.611417 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="3375b242-4a63-46f2-b281-c32c43250eec" containerName="proxy-httpd" Sep 30 17:27:23 crc kubenswrapper[4818]: E0930 17:27:23.611427 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3375b242-4a63-46f2-b281-c32c43250eec" containerName="ceilometer-notification-agent" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.611434 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="3375b242-4a63-46f2-b281-c32c43250eec" containerName="ceilometer-notification-agent" Sep 30 17:27:23 crc kubenswrapper[4818]: E0930 17:27:23.611441 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3375b242-4a63-46f2-b281-c32c43250eec" containerName="ceilometer-central-agent" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.611447 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="3375b242-4a63-46f2-b281-c32c43250eec" containerName="ceilometer-central-agent" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.611621 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="3375b242-4a63-46f2-b281-c32c43250eec" containerName="ceilometer-central-agent" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.611644 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="3375b242-4a63-46f2-b281-c32c43250eec" containerName="ceilometer-notification-agent" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.611653 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="3375b242-4a63-46f2-b281-c32c43250eec" containerName="sg-core" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.611672 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="3375b242-4a63-46f2-b281-c32c43250eec" containerName="proxy-httpd" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.614645 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.621157 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.621398 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.623206 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.636288 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.665324 4818 scope.go:117] "RemoveContainer" containerID="e1f12d1d5c008853e4252ac8d3aebb747337daa597364e55a1821b86604e091c" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.711513 4818 scope.go:117] "RemoveContainer" containerID="0968d1f49de3a18f2c51127117f90ac26819fd1810fbcefbc13d74741504e337" Sep 30 17:27:23 crc kubenswrapper[4818]: E0930 17:27:23.729852 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0968d1f49de3a18f2c51127117f90ac26819fd1810fbcefbc13d74741504e337\": container with ID starting with 0968d1f49de3a18f2c51127117f90ac26819fd1810fbcefbc13d74741504e337 not found: ID does not exist" containerID="0968d1f49de3a18f2c51127117f90ac26819fd1810fbcefbc13d74741504e337" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.729936 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0968d1f49de3a18f2c51127117f90ac26819fd1810fbcefbc13d74741504e337"} err="failed to get container status \"0968d1f49de3a18f2c51127117f90ac26819fd1810fbcefbc13d74741504e337\": rpc error: code = NotFound desc = could not find container \"0968d1f49de3a18f2c51127117f90ac26819fd1810fbcefbc13d74741504e337\": container with ID starting with 0968d1f49de3a18f2c51127117f90ac26819fd1810fbcefbc13d74741504e337 not found: ID does not exist" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.729960 4818 scope.go:117] "RemoveContainer" containerID="936a4469ed9e77bde8c10cdcef8842c3b0aaa9a0b3531d5a28ac59042e1a3df6" Sep 30 17:27:23 crc kubenswrapper[4818]: E0930 17:27:23.733267 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"936a4469ed9e77bde8c10cdcef8842c3b0aaa9a0b3531d5a28ac59042e1a3df6\": container with ID starting with 936a4469ed9e77bde8c10cdcef8842c3b0aaa9a0b3531d5a28ac59042e1a3df6 not found: ID does not exist" containerID="936a4469ed9e77bde8c10cdcef8842c3b0aaa9a0b3531d5a28ac59042e1a3df6" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.733311 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"936a4469ed9e77bde8c10cdcef8842c3b0aaa9a0b3531d5a28ac59042e1a3df6"} err="failed to get container status \"936a4469ed9e77bde8c10cdcef8842c3b0aaa9a0b3531d5a28ac59042e1a3df6\": rpc error: code = NotFound desc = could not find container \"936a4469ed9e77bde8c10cdcef8842c3b0aaa9a0b3531d5a28ac59042e1a3df6\": container with ID starting with 936a4469ed9e77bde8c10cdcef8842c3b0aaa9a0b3531d5a28ac59042e1a3df6 not found: ID does not exist" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.733337 4818 scope.go:117] "RemoveContainer" containerID="f85c0ab1a3f0489fe78a14b55241c2768126c1d80bcfa54cb7e2af7c83dc8ec3" Sep 30 17:27:23 crc kubenswrapper[4818]: E0930 17:27:23.733683 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f85c0ab1a3f0489fe78a14b55241c2768126c1d80bcfa54cb7e2af7c83dc8ec3\": container with ID starting with f85c0ab1a3f0489fe78a14b55241c2768126c1d80bcfa54cb7e2af7c83dc8ec3 not found: ID does not exist" containerID="f85c0ab1a3f0489fe78a14b55241c2768126c1d80bcfa54cb7e2af7c83dc8ec3" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.733709 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f85c0ab1a3f0489fe78a14b55241c2768126c1d80bcfa54cb7e2af7c83dc8ec3"} err="failed to get container status \"f85c0ab1a3f0489fe78a14b55241c2768126c1d80bcfa54cb7e2af7c83dc8ec3\": rpc error: code = NotFound desc = could not find container \"f85c0ab1a3f0489fe78a14b55241c2768126c1d80bcfa54cb7e2af7c83dc8ec3\": container with ID starting with f85c0ab1a3f0489fe78a14b55241c2768126c1d80bcfa54cb7e2af7c83dc8ec3 not found: ID does not exist" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.733723 4818 scope.go:117] "RemoveContainer" containerID="e1f12d1d5c008853e4252ac8d3aebb747337daa597364e55a1821b86604e091c" Sep 30 17:27:23 crc kubenswrapper[4818]: E0930 17:27:23.733902 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1f12d1d5c008853e4252ac8d3aebb747337daa597364e55a1821b86604e091c\": container with ID starting with e1f12d1d5c008853e4252ac8d3aebb747337daa597364e55a1821b86604e091c not found: ID does not exist" containerID="e1f12d1d5c008853e4252ac8d3aebb747337daa597364e55a1821b86604e091c" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.733941 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1f12d1d5c008853e4252ac8d3aebb747337daa597364e55a1821b86604e091c"} err="failed to get container status \"e1f12d1d5c008853e4252ac8d3aebb747337daa597364e55a1821b86604e091c\": rpc error: code = NotFound desc = could not find container \"e1f12d1d5c008853e4252ac8d3aebb747337daa597364e55a1821b86604e091c\": container with ID starting with e1f12d1d5c008853e4252ac8d3aebb747337daa597364e55a1821b86604e091c not found: ID does not exist" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.788666 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-log-httpd\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.788718 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhrb8\" (UniqueName: \"kubernetes.io/projected/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-kube-api-access-bhrb8\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.788868 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.789094 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.789157 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-scripts\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.789224 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-config-data\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.789267 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.789314 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-run-httpd\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.890401 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.890477 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.890502 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-scripts\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.890537 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-config-data\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.890556 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.890580 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-run-httpd\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.890608 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-log-httpd\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.890627 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhrb8\" (UniqueName: \"kubernetes.io/projected/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-kube-api-access-bhrb8\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.891425 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-run-httpd\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.892266 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-log-httpd\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.894056 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.894086 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.895456 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-scripts\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.898538 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-config-data\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.910591 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhrb8\" (UniqueName: \"kubernetes.io/projected/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-kube-api-access-bhrb8\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.920776 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:23 crc kubenswrapper[4818]: I0930 17:27:23.942322 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:24 crc kubenswrapper[4818]: I0930 17:27:24.035553 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3375b242-4a63-46f2-b281-c32c43250eec" path="/var/lib/kubelet/pods/3375b242-4a63-46f2-b281-c32c43250eec/volumes" Sep 30 17:27:24 crc kubenswrapper[4818]: I0930 17:27:24.439849 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:27:24 crc kubenswrapper[4818]: W0930 17:27:24.462621 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbbf0aa0b_7066_4b1e_9a82_3ba1c8698799.slice/crio-3e39e2fa66341c2652d198e0863d4b41ff1186627c9f7017458574b84113190d WatchSource:0}: Error finding container 3e39e2fa66341c2652d198e0863d4b41ff1186627c9f7017458574b84113190d: Status 404 returned error can't find the container with id 3e39e2fa66341c2652d198e0863d4b41ff1186627c9f7017458574b84113190d Sep 30 17:27:24 crc kubenswrapper[4818]: I0930 17:27:24.548044 4818 generic.go:334] "Generic (PLEG): container finished" podID="4b9b8c1d-e808-496e-9044-7b4b9f1d6416" containerID="787eb9a9e23b0d31fa4f24ef42a9edbb369229c0b1d17ca3fc0d5a5b32fd6ceb" exitCode=0 Sep 30 17:27:24 crc kubenswrapper[4818]: I0930 17:27:24.548090 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"4b9b8c1d-e808-496e-9044-7b4b9f1d6416","Type":"ContainerDied","Data":"787eb9a9e23b0d31fa4f24ef42a9edbb369229c0b1d17ca3fc0d5a5b32fd6ceb"} Sep 30 17:27:24 crc kubenswrapper[4818]: I0930 17:27:24.549120 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799","Type":"ContainerStarted","Data":"3e39e2fa66341c2652d198e0863d4b41ff1186627c9f7017458574b84113190d"} Sep 30 17:27:24 crc kubenswrapper[4818]: I0930 17:27:24.808998 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:27:24 crc kubenswrapper[4818]: I0930 17:27:24.907567 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mlp92\" (UniqueName: \"kubernetes.io/projected/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-kube-api-access-mlp92\") pod \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " Sep 30 17:27:24 crc kubenswrapper[4818]: I0930 17:27:24.907626 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-combined-ca-bundle\") pod \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " Sep 30 17:27:24 crc kubenswrapper[4818]: I0930 17:27:24.907650 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-config-data\") pod \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " Sep 30 17:27:24 crc kubenswrapper[4818]: I0930 17:27:24.907693 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-logs\") pod \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " Sep 30 17:27:24 crc kubenswrapper[4818]: I0930 17:27:24.907742 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-cert-memcached-mtls\") pod \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\" (UID: \"4b9b8c1d-e808-496e-9044-7b4b9f1d6416\") " Sep 30 17:27:24 crc kubenswrapper[4818]: I0930 17:27:24.909229 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-logs" (OuterVolumeSpecName: "logs") pod "4b9b8c1d-e808-496e-9044-7b4b9f1d6416" (UID: "4b9b8c1d-e808-496e-9044-7b4b9f1d6416"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:27:24 crc kubenswrapper[4818]: I0930 17:27:24.914129 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-kube-api-access-mlp92" (OuterVolumeSpecName: "kube-api-access-mlp92") pod "4b9b8c1d-e808-496e-9044-7b4b9f1d6416" (UID: "4b9b8c1d-e808-496e-9044-7b4b9f1d6416"). InnerVolumeSpecName "kube-api-access-mlp92". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:27:24 crc kubenswrapper[4818]: I0930 17:27:24.931391 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4b9b8c1d-e808-496e-9044-7b4b9f1d6416" (UID: "4b9b8c1d-e808-496e-9044-7b4b9f1d6416"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:24 crc kubenswrapper[4818]: I0930 17:27:24.952245 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-config-data" (OuterVolumeSpecName: "config-data") pod "4b9b8c1d-e808-496e-9044-7b4b9f1d6416" (UID: "4b9b8c1d-e808-496e-9044-7b4b9f1d6416"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:24 crc kubenswrapper[4818]: I0930 17:27:24.967038 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "4b9b8c1d-e808-496e-9044-7b4b9f1d6416" (UID: "4b9b8c1d-e808-496e-9044-7b4b9f1d6416"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.010229 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mlp92\" (UniqueName: \"kubernetes.io/projected/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-kube-api-access-mlp92\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.010551 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.010565 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.010576 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.010586 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/4b9b8c1d-e808-496e-9044-7b4b9f1d6416-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.021361 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:27:25 crc kubenswrapper[4818]: E0930 17:27:25.021610 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.562778 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"4b9b8c1d-e808-496e-9044-7b4b9f1d6416","Type":"ContainerDied","Data":"3eba3a9fc5d20adc5023415d38f1e320f77a719b2ce4106fc41367fced848aa6"} Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.563030 4818 scope.go:117] "RemoveContainer" containerID="787eb9a9e23b0d31fa4f24ef42a9edbb369229c0b1d17ca3fc0d5a5b32fd6ceb" Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.563154 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.576981 4818 generic.go:334] "Generic (PLEG): container finished" podID="9b2e56bc-7917-4cf5-83f0-4919fd154299" containerID="efbf3be4f5710efdb224f4ca9abed01a77e43dc1d971b46dc2be3f5373df2d7e" exitCode=0 Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.577069 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"9b2e56bc-7917-4cf5-83f0-4919fd154299","Type":"ContainerDied","Data":"efbf3be4f5710efdb224f4ca9abed01a77e43dc1d971b46dc2be3f5373df2d7e"} Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.580307 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799","Type":"ContainerStarted","Data":"672ffa7c8fe910d41c3d41d43f4f359b47ed7a54939f685edcb0b9d0ee9762e0"} Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.616997 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.621688 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.775843 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.932826 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-custom-prometheus-ca\") pod \"9b2e56bc-7917-4cf5-83f0-4919fd154299\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.932951 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-combined-ca-bundle\") pod \"9b2e56bc-7917-4cf5-83f0-4919fd154299\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.933003 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b2e56bc-7917-4cf5-83f0-4919fd154299-logs\") pod \"9b2e56bc-7917-4cf5-83f0-4919fd154299\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.933142 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-config-data\") pod \"9b2e56bc-7917-4cf5-83f0-4919fd154299\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.933177 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rkgrs\" (UniqueName: \"kubernetes.io/projected/9b2e56bc-7917-4cf5-83f0-4919fd154299-kube-api-access-rkgrs\") pod \"9b2e56bc-7917-4cf5-83f0-4919fd154299\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.933198 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-cert-memcached-mtls\") pod \"9b2e56bc-7917-4cf5-83f0-4919fd154299\" (UID: \"9b2e56bc-7917-4cf5-83f0-4919fd154299\") " Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.934135 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b2e56bc-7917-4cf5-83f0-4919fd154299-logs" (OuterVolumeSpecName: "logs") pod "9b2e56bc-7917-4cf5-83f0-4919fd154299" (UID: "9b2e56bc-7917-4cf5-83f0-4919fd154299"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.938482 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b2e56bc-7917-4cf5-83f0-4919fd154299-kube-api-access-rkgrs" (OuterVolumeSpecName: "kube-api-access-rkgrs") pod "9b2e56bc-7917-4cf5-83f0-4919fd154299" (UID: "9b2e56bc-7917-4cf5-83f0-4919fd154299"). InnerVolumeSpecName "kube-api-access-rkgrs". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.974795 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9b2e56bc-7917-4cf5-83f0-4919fd154299" (UID: "9b2e56bc-7917-4cf5-83f0-4919fd154299"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:25 crc kubenswrapper[4818]: I0930 17:27:25.992690 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "9b2e56bc-7917-4cf5-83f0-4919fd154299" (UID: "9b2e56bc-7917-4cf5-83f0-4919fd154299"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:26 crc kubenswrapper[4818]: I0930 17:27:26.026073 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-config-data" (OuterVolumeSpecName: "config-data") pod "9b2e56bc-7917-4cf5-83f0-4919fd154299" (UID: "9b2e56bc-7917-4cf5-83f0-4919fd154299"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:26 crc kubenswrapper[4818]: I0930 17:27:26.034514 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b9b8c1d-e808-496e-9044-7b4b9f1d6416" path="/var/lib/kubelet/pods/4b9b8c1d-e808-496e-9044-7b4b9f1d6416/volumes" Sep 30 17:27:26 crc kubenswrapper[4818]: I0930 17:27:26.034798 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:26 crc kubenswrapper[4818]: I0930 17:27:26.034827 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rkgrs\" (UniqueName: \"kubernetes.io/projected/9b2e56bc-7917-4cf5-83f0-4919fd154299-kube-api-access-rkgrs\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:26 crc kubenswrapper[4818]: I0930 17:27:26.034840 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:26 crc kubenswrapper[4818]: I0930 17:27:26.034851 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:26 crc kubenswrapper[4818]: I0930 17:27:26.034862 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b2e56bc-7917-4cf5-83f0-4919fd154299-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:26 crc kubenswrapper[4818]: I0930 17:27:26.049183 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "9b2e56bc-7917-4cf5-83f0-4919fd154299" (UID: "9b2e56bc-7917-4cf5-83f0-4919fd154299"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:26 crc kubenswrapper[4818]: I0930 17:27:26.135889 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/9b2e56bc-7917-4cf5-83f0-4919fd154299-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:26 crc kubenswrapper[4818]: I0930 17:27:26.592284 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"9b2e56bc-7917-4cf5-83f0-4919fd154299","Type":"ContainerDied","Data":"b7065a79a4a4e975feb3a52a639c16061293094de9ed4c4f59e7e3b0a7824fd9"} Sep 30 17:27:26 crc kubenswrapper[4818]: I0930 17:27:26.592359 4818 scope.go:117] "RemoveContainer" containerID="efbf3be4f5710efdb224f4ca9abed01a77e43dc1d971b46dc2be3f5373df2d7e" Sep 30 17:27:26 crc kubenswrapper[4818]: I0930 17:27:26.592370 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:26 crc kubenswrapper[4818]: I0930 17:27:26.594528 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799","Type":"ContainerStarted","Data":"6e14aa00f7fab6ae289e8be0808d83940c109b4588dd735d37745813a22054c4"} Sep 30 17:27:26 crc kubenswrapper[4818]: I0930 17:27:26.635070 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:27:26 crc kubenswrapper[4818]: I0930 17:27:26.639479 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:27:27 crc kubenswrapper[4818]: I0930 17:27:27.604894 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799","Type":"ContainerStarted","Data":"9a338cd69f46cef6219ffa5e8c9b479024a72d97e7cd5809e3762143ab150d7d"} Sep 30 17:27:28 crc kubenswrapper[4818]: I0930 17:27:28.032995 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b2e56bc-7917-4cf5-83f0-4919fd154299" path="/var/lib/kubelet/pods/9b2e56bc-7917-4cf5-83f0-4919fd154299/volumes" Sep 30 17:27:28 crc kubenswrapper[4818]: I0930 17:27:28.638660 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799","Type":"ContainerStarted","Data":"57fb9317639a10d0db171ad5a43d68c634b34c6c5011ddddfb7b1466501a3ac1"} Sep 30 17:27:28 crc kubenswrapper[4818]: I0930 17:27:28.639061 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:28 crc kubenswrapper[4818]: I0930 17:27:28.671758 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=1.8041618069999998 podStartE2EDuration="5.671739255s" podCreationTimestamp="2025-09-30 17:27:23 +0000 UTC" firstStartedPulling="2025-09-30 17:27:24.473618725 +0000 UTC m=+1691.227890561" lastFinishedPulling="2025-09-30 17:27:28.341196193 +0000 UTC m=+1695.095468009" observedRunningTime="2025-09-30 17:27:28.668536339 +0000 UTC m=+1695.422808175" watchObservedRunningTime="2025-09-30 17:27:28.671739255 +0000 UTC m=+1695.426011071" Sep 30 17:27:29 crc kubenswrapper[4818]: I0930 17:27:29.111172 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-db-create-psdzk"] Sep 30 17:27:29 crc kubenswrapper[4818]: E0930 17:27:29.111560 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b2e56bc-7917-4cf5-83f0-4919fd154299" containerName="watcher-decision-engine" Sep 30 17:27:29 crc kubenswrapper[4818]: I0930 17:27:29.111577 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b2e56bc-7917-4cf5-83f0-4919fd154299" containerName="watcher-decision-engine" Sep 30 17:27:29 crc kubenswrapper[4818]: E0930 17:27:29.111585 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b9b8c1d-e808-496e-9044-7b4b9f1d6416" containerName="watcher-applier" Sep 30 17:27:29 crc kubenswrapper[4818]: I0930 17:27:29.111592 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b9b8c1d-e808-496e-9044-7b4b9f1d6416" containerName="watcher-applier" Sep 30 17:27:29 crc kubenswrapper[4818]: I0930 17:27:29.111747 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b2e56bc-7917-4cf5-83f0-4919fd154299" containerName="watcher-decision-engine" Sep 30 17:27:29 crc kubenswrapper[4818]: I0930 17:27:29.111782 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b9b8c1d-e808-496e-9044-7b4b9f1d6416" containerName="watcher-applier" Sep 30 17:27:29 crc kubenswrapper[4818]: I0930 17:27:29.112373 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-psdzk" Sep 30 17:27:29 crc kubenswrapper[4818]: I0930 17:27:29.122075 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-psdzk"] Sep 30 17:27:29 crc kubenswrapper[4818]: I0930 17:27:29.180448 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rd5lh\" (UniqueName: \"kubernetes.io/projected/de9cb9c1-0637-4973-8294-8fad4c871099-kube-api-access-rd5lh\") pod \"watcher-db-create-psdzk\" (UID: \"de9cb9c1-0637-4973-8294-8fad4c871099\") " pod="watcher-kuttl-default/watcher-db-create-psdzk" Sep 30 17:27:29 crc kubenswrapper[4818]: I0930 17:27:29.281448 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rd5lh\" (UniqueName: \"kubernetes.io/projected/de9cb9c1-0637-4973-8294-8fad4c871099-kube-api-access-rd5lh\") pod \"watcher-db-create-psdzk\" (UID: \"de9cb9c1-0637-4973-8294-8fad4c871099\") " pod="watcher-kuttl-default/watcher-db-create-psdzk" Sep 30 17:27:29 crc kubenswrapper[4818]: I0930 17:27:29.300297 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rd5lh\" (UniqueName: \"kubernetes.io/projected/de9cb9c1-0637-4973-8294-8fad4c871099-kube-api-access-rd5lh\") pod \"watcher-db-create-psdzk\" (UID: \"de9cb9c1-0637-4973-8294-8fad4c871099\") " pod="watcher-kuttl-default/watcher-db-create-psdzk" Sep 30 17:27:29 crc kubenswrapper[4818]: I0930 17:27:29.427409 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-psdzk" Sep 30 17:27:29 crc kubenswrapper[4818]: I0930 17:27:29.690793 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-psdzk"] Sep 30 17:27:30 crc kubenswrapper[4818]: I0930 17:27:30.662901 4818 generic.go:334] "Generic (PLEG): container finished" podID="de9cb9c1-0637-4973-8294-8fad4c871099" containerID="13cb3a5ce920b756ed9cf3d9e0a29318cccafdc4ed3ca8ec8fd6a6251d2b3256" exitCode=0 Sep 30 17:27:30 crc kubenswrapper[4818]: I0930 17:27:30.662968 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-psdzk" event={"ID":"de9cb9c1-0637-4973-8294-8fad4c871099","Type":"ContainerDied","Data":"13cb3a5ce920b756ed9cf3d9e0a29318cccafdc4ed3ca8ec8fd6a6251d2b3256"} Sep 30 17:27:30 crc kubenswrapper[4818]: I0930 17:27:30.663218 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-psdzk" event={"ID":"de9cb9c1-0637-4973-8294-8fad4c871099","Type":"ContainerStarted","Data":"4910243f57fa8269dcf636deb70673a718beb71fda44974e8a39806abe3bb1f2"} Sep 30 17:27:32 crc kubenswrapper[4818]: I0930 17:27:32.155793 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-psdzk" Sep 30 17:27:32 crc kubenswrapper[4818]: I0930 17:27:32.257810 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rd5lh\" (UniqueName: \"kubernetes.io/projected/de9cb9c1-0637-4973-8294-8fad4c871099-kube-api-access-rd5lh\") pod \"de9cb9c1-0637-4973-8294-8fad4c871099\" (UID: \"de9cb9c1-0637-4973-8294-8fad4c871099\") " Sep 30 17:27:32 crc kubenswrapper[4818]: I0930 17:27:32.263087 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de9cb9c1-0637-4973-8294-8fad4c871099-kube-api-access-rd5lh" (OuterVolumeSpecName: "kube-api-access-rd5lh") pod "de9cb9c1-0637-4973-8294-8fad4c871099" (UID: "de9cb9c1-0637-4973-8294-8fad4c871099"). InnerVolumeSpecName "kube-api-access-rd5lh". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:27:32 crc kubenswrapper[4818]: I0930 17:27:32.359178 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rd5lh\" (UniqueName: \"kubernetes.io/projected/de9cb9c1-0637-4973-8294-8fad4c871099-kube-api-access-rd5lh\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:32 crc kubenswrapper[4818]: I0930 17:27:32.688997 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-psdzk" event={"ID":"de9cb9c1-0637-4973-8294-8fad4c871099","Type":"ContainerDied","Data":"4910243f57fa8269dcf636deb70673a718beb71fda44974e8a39806abe3bb1f2"} Sep 30 17:27:32 crc kubenswrapper[4818]: I0930 17:27:32.689053 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4910243f57fa8269dcf636deb70673a718beb71fda44974e8a39806abe3bb1f2" Sep 30 17:27:32 crc kubenswrapper[4818]: I0930 17:27:32.689125 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-psdzk" Sep 30 17:27:36 crc kubenswrapper[4818]: I0930 17:27:36.020772 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:27:36 crc kubenswrapper[4818]: E0930 17:27:36.021510 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:27:39 crc kubenswrapper[4818]: I0930 17:27:39.037040 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/keystone-db-sync-6vgps"] Sep 30 17:27:39 crc kubenswrapper[4818]: I0930 17:27:39.044947 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/keystone-db-sync-6vgps"] Sep 30 17:27:39 crc kubenswrapper[4818]: I0930 17:27:39.096581 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-a500-account-create-jjkfw"] Sep 30 17:27:39 crc kubenswrapper[4818]: E0930 17:27:39.096940 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de9cb9c1-0637-4973-8294-8fad4c871099" containerName="mariadb-database-create" Sep 30 17:27:39 crc kubenswrapper[4818]: I0930 17:27:39.096956 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="de9cb9c1-0637-4973-8294-8fad4c871099" containerName="mariadb-database-create" Sep 30 17:27:39 crc kubenswrapper[4818]: I0930 17:27:39.101154 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="de9cb9c1-0637-4973-8294-8fad4c871099" containerName="mariadb-database-create" Sep 30 17:27:39 crc kubenswrapper[4818]: I0930 17:27:39.101704 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-a500-account-create-jjkfw" Sep 30 17:27:39 crc kubenswrapper[4818]: I0930 17:27:39.103491 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-db-secret" Sep 30 17:27:39 crc kubenswrapper[4818]: I0930 17:27:39.115798 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-a500-account-create-jjkfw"] Sep 30 17:27:39 crc kubenswrapper[4818]: I0930 17:27:39.278681 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bv4q\" (UniqueName: \"kubernetes.io/projected/7b23764d-cc98-4adc-8770-583784a65fc8-kube-api-access-8bv4q\") pod \"watcher-a500-account-create-jjkfw\" (UID: \"7b23764d-cc98-4adc-8770-583784a65fc8\") " pod="watcher-kuttl-default/watcher-a500-account-create-jjkfw" Sep 30 17:27:39 crc kubenswrapper[4818]: I0930 17:27:39.379985 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bv4q\" (UniqueName: \"kubernetes.io/projected/7b23764d-cc98-4adc-8770-583784a65fc8-kube-api-access-8bv4q\") pod \"watcher-a500-account-create-jjkfw\" (UID: \"7b23764d-cc98-4adc-8770-583784a65fc8\") " pod="watcher-kuttl-default/watcher-a500-account-create-jjkfw" Sep 30 17:27:39 crc kubenswrapper[4818]: I0930 17:27:39.408696 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bv4q\" (UniqueName: \"kubernetes.io/projected/7b23764d-cc98-4adc-8770-583784a65fc8-kube-api-access-8bv4q\") pod \"watcher-a500-account-create-jjkfw\" (UID: \"7b23764d-cc98-4adc-8770-583784a65fc8\") " pod="watcher-kuttl-default/watcher-a500-account-create-jjkfw" Sep 30 17:27:39 crc kubenswrapper[4818]: I0930 17:27:39.441166 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-a500-account-create-jjkfw" Sep 30 17:27:39 crc kubenswrapper[4818]: I0930 17:27:39.909710 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-a500-account-create-jjkfw"] Sep 30 17:27:39 crc kubenswrapper[4818]: W0930 17:27:39.918126 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b23764d_cc98_4adc_8770_583784a65fc8.slice/crio-64581bf82d4c5997894665b9268d99592ac0cf73e02421235473b72bacd3ed01 WatchSource:0}: Error finding container 64581bf82d4c5997894665b9268d99592ac0cf73e02421235473b72bacd3ed01: Status 404 returned error can't find the container with id 64581bf82d4c5997894665b9268d99592ac0cf73e02421235473b72bacd3ed01 Sep 30 17:27:40 crc kubenswrapper[4818]: I0930 17:27:40.036689 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a440f3f4-3dbb-4e92-96a6-1ed1bde159c2" path="/var/lib/kubelet/pods/a440f3f4-3dbb-4e92-96a6-1ed1bde159c2/volumes" Sep 30 17:27:40 crc kubenswrapper[4818]: I0930 17:27:40.770968 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-a500-account-create-jjkfw" event={"ID":"7b23764d-cc98-4adc-8770-583784a65fc8","Type":"ContainerDied","Data":"28ad73224de683d5b1daf56faeb3e739184d5088b3f6632e4258dc5843069878"} Sep 30 17:27:40 crc kubenswrapper[4818]: I0930 17:27:40.770901 4818 generic.go:334] "Generic (PLEG): container finished" podID="7b23764d-cc98-4adc-8770-583784a65fc8" containerID="28ad73224de683d5b1daf56faeb3e739184d5088b3f6632e4258dc5843069878" exitCode=0 Sep 30 17:27:40 crc kubenswrapper[4818]: I0930 17:27:40.771077 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-a500-account-create-jjkfw" event={"ID":"7b23764d-cc98-4adc-8770-583784a65fc8","Type":"ContainerStarted","Data":"64581bf82d4c5997894665b9268d99592ac0cf73e02421235473b72bacd3ed01"} Sep 30 17:27:42 crc kubenswrapper[4818]: I0930 17:27:42.270645 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-a500-account-create-jjkfw" Sep 30 17:27:42 crc kubenswrapper[4818]: I0930 17:27:42.426884 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bv4q\" (UniqueName: \"kubernetes.io/projected/7b23764d-cc98-4adc-8770-583784a65fc8-kube-api-access-8bv4q\") pod \"7b23764d-cc98-4adc-8770-583784a65fc8\" (UID: \"7b23764d-cc98-4adc-8770-583784a65fc8\") " Sep 30 17:27:42 crc kubenswrapper[4818]: I0930 17:27:42.436740 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b23764d-cc98-4adc-8770-583784a65fc8-kube-api-access-8bv4q" (OuterVolumeSpecName: "kube-api-access-8bv4q") pod "7b23764d-cc98-4adc-8770-583784a65fc8" (UID: "7b23764d-cc98-4adc-8770-583784a65fc8"). InnerVolumeSpecName "kube-api-access-8bv4q". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:27:42 crc kubenswrapper[4818]: I0930 17:27:42.529452 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bv4q\" (UniqueName: \"kubernetes.io/projected/7b23764d-cc98-4adc-8770-583784a65fc8-kube-api-access-8bv4q\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:42 crc kubenswrapper[4818]: I0930 17:27:42.787744 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-a500-account-create-jjkfw" event={"ID":"7b23764d-cc98-4adc-8770-583784a65fc8","Type":"ContainerDied","Data":"64581bf82d4c5997894665b9268d99592ac0cf73e02421235473b72bacd3ed01"} Sep 30 17:27:42 crc kubenswrapper[4818]: I0930 17:27:42.787994 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64581bf82d4c5997894665b9268d99592ac0cf73e02421235473b72bacd3ed01" Sep 30 17:27:42 crc kubenswrapper[4818]: I0930 17:27:42.787782 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-a500-account-create-jjkfw" Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.355485 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-cd928"] Sep 30 17:27:44 crc kubenswrapper[4818]: E0930 17:27:44.357705 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b23764d-cc98-4adc-8770-583784a65fc8" containerName="mariadb-account-create" Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.357736 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b23764d-cc98-4adc-8770-583784a65fc8" containerName="mariadb-account-create" Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.357946 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b23764d-cc98-4adc-8770-583784a65fc8" containerName="mariadb-account-create" Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.358627 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.360312 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-config-data" Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.360472 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-x2524" Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.372645 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-cd928"] Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.432239 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-cd928\" (UID: \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.432294 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-config-data\") pod \"watcher-kuttl-db-sync-cd928\" (UID: \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.432446 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqnrd\" (UniqueName: \"kubernetes.io/projected/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-kube-api-access-gqnrd\") pod \"watcher-kuttl-db-sync-cd928\" (UID: \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.432619 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-db-sync-config-data\") pod \"watcher-kuttl-db-sync-cd928\" (UID: \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.534403 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-db-sync-config-data\") pod \"watcher-kuttl-db-sync-cd928\" (UID: \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.534504 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-cd928\" (UID: \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.534551 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-config-data\") pod \"watcher-kuttl-db-sync-cd928\" (UID: \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.534619 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqnrd\" (UniqueName: \"kubernetes.io/projected/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-kube-api-access-gqnrd\") pod \"watcher-kuttl-db-sync-cd928\" (UID: \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.540751 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-config-data\") pod \"watcher-kuttl-db-sync-cd928\" (UID: \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.546519 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-cd928\" (UID: \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.557687 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-db-sync-config-data\") pod \"watcher-kuttl-db-sync-cd928\" (UID: \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.558035 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqnrd\" (UniqueName: \"kubernetes.io/projected/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-kube-api-access-gqnrd\") pod \"watcher-kuttl-db-sync-cd928\" (UID: \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" Sep 30 17:27:44 crc kubenswrapper[4818]: I0930 17:27:44.678031 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" Sep 30 17:27:45 crc kubenswrapper[4818]: I0930 17:27:45.144242 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-cd928"] Sep 30 17:27:45 crc kubenswrapper[4818]: W0930 17:27:45.149546 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6e4b8482_5f36_4a55_847e_c9a6c9ba65a5.slice/crio-7ae22af55a0beac6dba0d31d92c099daedc1da2b94a4527245c74051b2e71ba8 WatchSource:0}: Error finding container 7ae22af55a0beac6dba0d31d92c099daedc1da2b94a4527245c74051b2e71ba8: Status 404 returned error can't find the container with id 7ae22af55a0beac6dba0d31d92c099daedc1da2b94a4527245c74051b2e71ba8 Sep 30 17:27:45 crc kubenswrapper[4818]: I0930 17:27:45.815778 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" event={"ID":"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5","Type":"ContainerStarted","Data":"7258583eae528003d18118957a309983e8d5ba6f32e766408183c96837cb2398"} Sep 30 17:27:45 crc kubenswrapper[4818]: I0930 17:27:45.816061 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" event={"ID":"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5","Type":"ContainerStarted","Data":"7ae22af55a0beac6dba0d31d92c099daedc1da2b94a4527245c74051b2e71ba8"} Sep 30 17:27:45 crc kubenswrapper[4818]: I0930 17:27:45.837308 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" podStartSLOduration=1.837290978 podStartE2EDuration="1.837290978s" podCreationTimestamp="2025-09-30 17:27:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:27:45.835656514 +0000 UTC m=+1712.589928340" watchObservedRunningTime="2025-09-30 17:27:45.837290978 +0000 UTC m=+1712.591562794" Sep 30 17:27:47 crc kubenswrapper[4818]: I0930 17:27:47.832326 4818 generic.go:334] "Generic (PLEG): container finished" podID="6e4b8482-5f36-4a55-847e-c9a6c9ba65a5" containerID="7258583eae528003d18118957a309983e8d5ba6f32e766408183c96837cb2398" exitCode=0 Sep 30 17:27:47 crc kubenswrapper[4818]: I0930 17:27:47.832431 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" event={"ID":"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5","Type":"ContainerDied","Data":"7258583eae528003d18118957a309983e8d5ba6f32e766408183c96837cb2398"} Sep 30 17:27:49 crc kubenswrapper[4818]: I0930 17:27:49.020304 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:27:49 crc kubenswrapper[4818]: E0930 17:27:49.031342 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:27:49 crc kubenswrapper[4818]: I0930 17:27:49.227455 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" Sep 30 17:27:49 crc kubenswrapper[4818]: I0930 17:27:49.309654 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-combined-ca-bundle\") pod \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\" (UID: \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\") " Sep 30 17:27:49 crc kubenswrapper[4818]: I0930 17:27:49.309703 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-config-data\") pod \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\" (UID: \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\") " Sep 30 17:27:49 crc kubenswrapper[4818]: I0930 17:27:49.309744 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqnrd\" (UniqueName: \"kubernetes.io/projected/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-kube-api-access-gqnrd\") pod \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\" (UID: \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\") " Sep 30 17:27:49 crc kubenswrapper[4818]: I0930 17:27:49.309768 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-db-sync-config-data\") pod \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\" (UID: \"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5\") " Sep 30 17:27:49 crc kubenswrapper[4818]: I0930 17:27:49.314663 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-kube-api-access-gqnrd" (OuterVolumeSpecName: "kube-api-access-gqnrd") pod "6e4b8482-5f36-4a55-847e-c9a6c9ba65a5" (UID: "6e4b8482-5f36-4a55-847e-c9a6c9ba65a5"). InnerVolumeSpecName "kube-api-access-gqnrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:27:49 crc kubenswrapper[4818]: I0930 17:27:49.316099 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "6e4b8482-5f36-4a55-847e-c9a6c9ba65a5" (UID: "6e4b8482-5f36-4a55-847e-c9a6c9ba65a5"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:49 crc kubenswrapper[4818]: I0930 17:27:49.333460 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6e4b8482-5f36-4a55-847e-c9a6c9ba65a5" (UID: "6e4b8482-5f36-4a55-847e-c9a6c9ba65a5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:49 crc kubenswrapper[4818]: I0930 17:27:49.355856 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-config-data" (OuterVolumeSpecName: "config-data") pod "6e4b8482-5f36-4a55-847e-c9a6c9ba65a5" (UID: "6e4b8482-5f36-4a55-847e-c9a6c9ba65a5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:27:49 crc kubenswrapper[4818]: I0930 17:27:49.411180 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:49 crc kubenswrapper[4818]: I0930 17:27:49.411206 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:49 crc kubenswrapper[4818]: I0930 17:27:49.411216 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqnrd\" (UniqueName: \"kubernetes.io/projected/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-kube-api-access-gqnrd\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:49 crc kubenswrapper[4818]: I0930 17:27:49.411224 4818 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:27:49 crc kubenswrapper[4818]: I0930 17:27:49.849459 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" event={"ID":"6e4b8482-5f36-4a55-847e-c9a6c9ba65a5","Type":"ContainerDied","Data":"7ae22af55a0beac6dba0d31d92c099daedc1da2b94a4527245c74051b2e71ba8"} Sep 30 17:27:49 crc kubenswrapper[4818]: I0930 17:27:49.849497 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7ae22af55a0beac6dba0d31d92c099daedc1da2b94a4527245c74051b2e71ba8" Sep 30 17:27:49 crc kubenswrapper[4818]: I0930 17:27:49.849508 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-cd928" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.100186 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:27:50 crc kubenswrapper[4818]: E0930 17:27:50.100561 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e4b8482-5f36-4a55-847e-c9a6c9ba65a5" containerName="watcher-kuttl-db-sync" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.100573 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e4b8482-5f36-4a55-847e-c9a6c9ba65a5" containerName="watcher-kuttl-db-sync" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.100727 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e4b8482-5f36-4a55-847e-c9a6c9ba65a5" containerName="watcher-kuttl-db-sync" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.101597 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.105332 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-x2524" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.105873 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.107842 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.108886 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.111983 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-applier-config-data" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.127516 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.131533 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.143455 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.144383 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.148076 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.164418 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.225029 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.225074 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7cc2\" (UniqueName: \"kubernetes.io/projected/a703aca6-ed46-4141-8cfb-e5565daec9f4-kube-api-access-f7cc2\") pod \"watcher-kuttl-applier-0\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.225094 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a703aca6-ed46-4141-8cfb-e5565daec9f4-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.225114 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.225131 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.225251 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a703aca6-ed46-4141-8cfb-e5565daec9f4-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.225339 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.225408 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.225440 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gw62m\" (UniqueName: \"kubernetes.io/projected/c748315b-87b0-4452-afb3-268ac343bbce-kube-api-access-gw62m\") pod \"watcher-kuttl-api-0\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.225501 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a703aca6-ed46-4141-8cfb-e5565daec9f4-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.225614 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a703aca6-ed46-4141-8cfb-e5565daec9f4-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.225649 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfmc5\" (UniqueName: \"kubernetes.io/projected/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-kube-api-access-jfmc5\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.225754 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c748315b-87b0-4452-afb3-268ac343bbce-logs\") pod \"watcher-kuttl-api-0\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.225797 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.225844 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.225865 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.225906 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.326956 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c748315b-87b0-4452-afb3-268ac343bbce-logs\") pod \"watcher-kuttl-api-0\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.326993 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.327136 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.327157 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.327173 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.327208 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.327227 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7cc2\" (UniqueName: \"kubernetes.io/projected/a703aca6-ed46-4141-8cfb-e5565daec9f4-kube-api-access-f7cc2\") pod \"watcher-kuttl-applier-0\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.327242 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a703aca6-ed46-4141-8cfb-e5565daec9f4-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.327259 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.327274 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.327298 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a703aca6-ed46-4141-8cfb-e5565daec9f4-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.327321 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.327349 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.327365 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gw62m\" (UniqueName: \"kubernetes.io/projected/c748315b-87b0-4452-afb3-268ac343bbce-kube-api-access-gw62m\") pod \"watcher-kuttl-api-0\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.327379 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a703aca6-ed46-4141-8cfb-e5565daec9f4-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.327402 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a703aca6-ed46-4141-8cfb-e5565daec9f4-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.327400 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c748315b-87b0-4452-afb3-268ac343bbce-logs\") pod \"watcher-kuttl-api-0\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.327418 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfmc5\" (UniqueName: \"kubernetes.io/projected/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-kube-api-access-jfmc5\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.328299 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.328988 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a703aca6-ed46-4141-8cfb-e5565daec9f4-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.331472 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a703aca6-ed46-4141-8cfb-e5565daec9f4-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.333674 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.342298 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.342435 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a703aca6-ed46-4141-8cfb-e5565daec9f4-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.342487 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a703aca6-ed46-4141-8cfb-e5565daec9f4-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.342745 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.342787 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.342848 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.344702 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.347482 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.349534 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7cc2\" (UniqueName: \"kubernetes.io/projected/a703aca6-ed46-4141-8cfb-e5565daec9f4-kube-api-access-f7cc2\") pod \"watcher-kuttl-applier-0\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.349678 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.349982 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfmc5\" (UniqueName: \"kubernetes.io/projected/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-kube-api-access-jfmc5\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.350635 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gw62m\" (UniqueName: \"kubernetes.io/projected/c748315b-87b0-4452-afb3-268ac343bbce-kube-api-access-gw62m\") pod \"watcher-kuttl-api-0\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.422048 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.432275 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:27:50 crc kubenswrapper[4818]: I0930 17:27:50.461161 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:27:51 crc kubenswrapper[4818]: I0930 17:27:51.076136 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:27:51 crc kubenswrapper[4818]: I0930 17:27:51.164222 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:27:51 crc kubenswrapper[4818]: I0930 17:27:51.193692 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:27:51 crc kubenswrapper[4818]: I0930 17:27:51.863734 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23","Type":"ContainerStarted","Data":"bb8598e82102294a9eaed0ff410d368bbdb2fe417cc0a8ec7a53df335015949b"} Sep 30 17:27:51 crc kubenswrapper[4818]: I0930 17:27:51.864068 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23","Type":"ContainerStarted","Data":"cab820a90ee7eaba644e027b1ba3a9aeb7574a1c57be7339d0b12bcb0c732b08"} Sep 30 17:27:51 crc kubenswrapper[4818]: I0930 17:27:51.866985 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"c748315b-87b0-4452-afb3-268ac343bbce","Type":"ContainerStarted","Data":"75bec3ab40967464e3ec8ee9356f3f1769a0927d7e7dcf51b77530b18f62109f"} Sep 30 17:27:51 crc kubenswrapper[4818]: I0930 17:27:51.867030 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"c748315b-87b0-4452-afb3-268ac343bbce","Type":"ContainerStarted","Data":"5aa56bc2c06eb9479b9cdff31ffb50628bb171342b32c3a0e850c3a7a65c04b1"} Sep 30 17:27:51 crc kubenswrapper[4818]: I0930 17:27:51.867042 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"c748315b-87b0-4452-afb3-268ac343bbce","Type":"ContainerStarted","Data":"720c8ead865a78e9450baa76d90c78b04bb20ab7bf45edf8ba130c74f832a242"} Sep 30 17:27:51 crc kubenswrapper[4818]: I0930 17:27:51.867843 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:51 crc kubenswrapper[4818]: I0930 17:27:51.869202 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"a703aca6-ed46-4141-8cfb-e5565daec9f4","Type":"ContainerStarted","Data":"30f8724d1ea0ea24d1175f50d66adbdf2f3be4c36999305a3140e188f7e1db92"} Sep 30 17:27:51 crc kubenswrapper[4818]: I0930 17:27:51.869255 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"a703aca6-ed46-4141-8cfb-e5565daec9f4","Type":"ContainerStarted","Data":"04cba75518053006eb92be2bbda1e8e9b27fe3629c9f4331fe2527398c8ae1c5"} Sep 30 17:27:51 crc kubenswrapper[4818]: I0930 17:27:51.882154 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=1.882097927 podStartE2EDuration="1.882097927s" podCreationTimestamp="2025-09-30 17:27:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:27:51.879293881 +0000 UTC m=+1718.633565697" watchObservedRunningTime="2025-09-30 17:27:51.882097927 +0000 UTC m=+1718.636369743" Sep 30 17:27:51 crc kubenswrapper[4818]: I0930 17:27:51.903402 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=1.9033851529999999 podStartE2EDuration="1.903385153s" podCreationTimestamp="2025-09-30 17:27:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:27:51.900492535 +0000 UTC m=+1718.654764361" watchObservedRunningTime="2025-09-30 17:27:51.903385153 +0000 UTC m=+1718.657656969" Sep 30 17:27:51 crc kubenswrapper[4818]: I0930 17:27:51.932428 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podStartSLOduration=1.932406198 podStartE2EDuration="1.932406198s" podCreationTimestamp="2025-09-30 17:27:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:27:51.929053837 +0000 UTC m=+1718.683325653" watchObservedRunningTime="2025-09-30 17:27:51.932406198 +0000 UTC m=+1718.686678014" Sep 30 17:27:53 crc kubenswrapper[4818]: I0930 17:27:53.889773 4818 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Sep 30 17:27:53 crc kubenswrapper[4818]: I0930 17:27:53.951643 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:27:54 crc kubenswrapper[4818]: I0930 17:27:54.168854 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:55 crc kubenswrapper[4818]: I0930 17:27:55.423270 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:27:55 crc kubenswrapper[4818]: I0930 17:27:55.432807 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:00 crc kubenswrapper[4818]: I0930 17:28:00.423172 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:00 crc kubenswrapper[4818]: I0930 17:28:00.428845 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:00 crc kubenswrapper[4818]: I0930 17:28:00.432788 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:00 crc kubenswrapper[4818]: I0930 17:28:00.461684 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:00 crc kubenswrapper[4818]: I0930 17:28:00.477113 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:00 crc kubenswrapper[4818]: I0930 17:28:00.503740 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:00 crc kubenswrapper[4818]: I0930 17:28:00.984721 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:01 crc kubenswrapper[4818]: I0930 17:28:01.053760 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:01 crc kubenswrapper[4818]: I0930 17:28:01.063400 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:01 crc kubenswrapper[4818]: I0930 17:28:01.066289 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:02 crc kubenswrapper[4818]: I0930 17:28:02.214637 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:28:02 crc kubenswrapper[4818]: I0930 17:28:02.215513 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerName="ceilometer-central-agent" containerID="cri-o://672ffa7c8fe910d41c3d41d43f4f359b47ed7a54939f685edcb0b9d0ee9762e0" gracePeriod=30 Sep 30 17:28:02 crc kubenswrapper[4818]: I0930 17:28:02.215597 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerName="sg-core" containerID="cri-o://9a338cd69f46cef6219ffa5e8c9b479024a72d97e7cd5809e3762143ab150d7d" gracePeriod=30 Sep 30 17:28:02 crc kubenswrapper[4818]: I0930 17:28:02.215580 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerName="proxy-httpd" containerID="cri-o://57fb9317639a10d0db171ad5a43d68c634b34c6c5011ddddfb7b1466501a3ac1" gracePeriod=30 Sep 30 17:28:02 crc kubenswrapper[4818]: I0930 17:28:02.215667 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerName="ceilometer-notification-agent" containerID="cri-o://6e14aa00f7fab6ae289e8be0808d83940c109b4588dd735d37745813a22054c4" gracePeriod=30 Sep 30 17:28:03 crc kubenswrapper[4818]: I0930 17:28:03.005855 4818 generic.go:334] "Generic (PLEG): container finished" podID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerID="57fb9317639a10d0db171ad5a43d68c634b34c6c5011ddddfb7b1466501a3ac1" exitCode=0 Sep 30 17:28:03 crc kubenswrapper[4818]: I0930 17:28:03.006293 4818 generic.go:334] "Generic (PLEG): container finished" podID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerID="9a338cd69f46cef6219ffa5e8c9b479024a72d97e7cd5809e3762143ab150d7d" exitCode=2 Sep 30 17:28:03 crc kubenswrapper[4818]: I0930 17:28:03.006301 4818 generic.go:334] "Generic (PLEG): container finished" podID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerID="672ffa7c8fe910d41c3d41d43f4f359b47ed7a54939f685edcb0b9d0ee9762e0" exitCode=0 Sep 30 17:28:03 crc kubenswrapper[4818]: I0930 17:28:03.005989 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799","Type":"ContainerDied","Data":"57fb9317639a10d0db171ad5a43d68c634b34c6c5011ddddfb7b1466501a3ac1"} Sep 30 17:28:03 crc kubenswrapper[4818]: I0930 17:28:03.006421 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799","Type":"ContainerDied","Data":"9a338cd69f46cef6219ffa5e8c9b479024a72d97e7cd5809e3762143ab150d7d"} Sep 30 17:28:03 crc kubenswrapper[4818]: I0930 17:28:03.006440 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799","Type":"ContainerDied","Data":"672ffa7c8fe910d41c3d41d43f4f359b47ed7a54939f685edcb0b9d0ee9762e0"} Sep 30 17:28:03 crc kubenswrapper[4818]: I0930 17:28:03.021075 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:28:03 crc kubenswrapper[4818]: E0930 17:28:03.021506 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.715343 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-cd928"] Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.722736 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-cd928"] Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.753910 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watchera500-account-delete-z9hn2"] Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.754960 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watchera500-account-delete-z9hn2" Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.765487 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watchera500-account-delete-z9hn2"] Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.789462 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.789670 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="c276f56d-cf96-4dd0-b43b-ea9e72d2ec23" containerName="watcher-decision-engine" containerID="cri-o://bb8598e82102294a9eaed0ff410d368bbdb2fe417cc0a8ec7a53df335015949b" gracePeriod=30 Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.806601 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-db-create-psdzk"] Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.829747 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-a500-account-create-jjkfw"] Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.848515 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watchera500-account-delete-z9hn2"] Sep 30 17:28:08 crc kubenswrapper[4818]: E0930 17:28:08.849212 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-j4psc], unattached volumes=[], failed to process volumes=[]: context canceled" pod="watcher-kuttl-default/watchera500-account-delete-z9hn2" podUID="90809275-11d8-49b5-8a9b-2797b95657b8" Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.850647 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4psc\" (UniqueName: \"kubernetes.io/projected/90809275-11d8-49b5-8a9b-2797b95657b8-kube-api-access-j4psc\") pod \"watchera500-account-delete-z9hn2\" (UID: \"90809275-11d8-49b5-8a9b-2797b95657b8\") " pod="watcher-kuttl-default/watchera500-account-delete-z9hn2" Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.853986 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-db-create-psdzk"] Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.860717 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-a500-account-create-jjkfw"] Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.870022 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.870285 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="c748315b-87b0-4452-afb3-268ac343bbce" containerName="watcher-kuttl-api-log" containerID="cri-o://5aa56bc2c06eb9479b9cdff31ffb50628bb171342b32c3a0e850c3a7a65c04b1" gracePeriod=30 Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.870468 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="c748315b-87b0-4452-afb3-268ac343bbce" containerName="watcher-api" containerID="cri-o://75bec3ab40967464e3ec8ee9356f3f1769a0927d7e7dcf51b77530b18f62109f" gracePeriod=30 Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.902707 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.902943 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="a703aca6-ed46-4141-8cfb-e5565daec9f4" containerName="watcher-applier" containerID="cri-o://30f8724d1ea0ea24d1175f50d66adbdf2f3be4c36999305a3140e188f7e1db92" gracePeriod=30 Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.952296 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4psc\" (UniqueName: \"kubernetes.io/projected/90809275-11d8-49b5-8a9b-2797b95657b8-kube-api-access-j4psc\") pod \"watchera500-account-delete-z9hn2\" (UID: \"90809275-11d8-49b5-8a9b-2797b95657b8\") " pod="watcher-kuttl-default/watchera500-account-delete-z9hn2" Sep 30 17:28:08 crc kubenswrapper[4818]: I0930 17:28:08.975794 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4psc\" (UniqueName: \"kubernetes.io/projected/90809275-11d8-49b5-8a9b-2797b95657b8-kube-api-access-j4psc\") pod \"watchera500-account-delete-z9hn2\" (UID: \"90809275-11d8-49b5-8a9b-2797b95657b8\") " pod="watcher-kuttl-default/watchera500-account-delete-z9hn2" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.058519 4818 generic.go:334] "Generic (PLEG): container finished" podID="c748315b-87b0-4452-afb3-268ac343bbce" containerID="5aa56bc2c06eb9479b9cdff31ffb50628bb171342b32c3a0e850c3a7a65c04b1" exitCode=143 Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.058842 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watchera500-account-delete-z9hn2" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.059371 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"c748315b-87b0-4452-afb3-268ac343bbce","Type":"ContainerDied","Data":"5aa56bc2c06eb9479b9cdff31ffb50628bb171342b32c3a0e850c3a7a65c04b1"} Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.066435 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watchera500-account-delete-z9hn2" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.155073 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j4psc\" (UniqueName: \"kubernetes.io/projected/90809275-11d8-49b5-8a9b-2797b95657b8-kube-api-access-j4psc\") pod \"90809275-11d8-49b5-8a9b-2797b95657b8\" (UID: \"90809275-11d8-49b5-8a9b-2797b95657b8\") " Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.167587 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90809275-11d8-49b5-8a9b-2797b95657b8-kube-api-access-j4psc" (OuterVolumeSpecName: "kube-api-access-j4psc") pod "90809275-11d8-49b5-8a9b-2797b95657b8" (UID: "90809275-11d8-49b5-8a9b-2797b95657b8"). InnerVolumeSpecName "kube-api-access-j4psc". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.241428 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-db-create-5g6kd"] Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.242558 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-5g6kd" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.251320 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-5g6kd"] Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.256655 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j4psc\" (UniqueName: \"kubernetes.io/projected/90809275-11d8-49b5-8a9b-2797b95657b8-kube-api-access-j4psc\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.357845 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ps6hj\" (UniqueName: \"kubernetes.io/projected/5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc-kube-api-access-ps6hj\") pod \"watcher-db-create-5g6kd\" (UID: \"5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc\") " pod="watcher-kuttl-default/watcher-db-create-5g6kd" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.459581 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ps6hj\" (UniqueName: \"kubernetes.io/projected/5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc-kube-api-access-ps6hj\") pod \"watcher-db-create-5g6kd\" (UID: \"5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc\") " pod="watcher-kuttl-default/watcher-db-create-5g6kd" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.481496 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ps6hj\" (UniqueName: \"kubernetes.io/projected/5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc-kube-api-access-ps6hj\") pod \"watcher-db-create-5g6kd\" (UID: \"5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc\") " pod="watcher-kuttl-default/watcher-db-create-5g6kd" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.560020 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-5g6kd" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.768792 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.865126 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-combined-ca-bundle\") pod \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.865182 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-sg-core-conf-yaml\") pod \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.865219 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-scripts\") pod \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.865250 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-run-httpd\") pod \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.865311 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bhrb8\" (UniqueName: \"kubernetes.io/projected/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-kube-api-access-bhrb8\") pod \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.865333 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-ceilometer-tls-certs\") pod \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.865393 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-config-data\") pod \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.865447 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-log-httpd\") pod \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\" (UID: \"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799\") " Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.866175 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" (UID: "bbf0aa0b-7066-4b1e-9a82-3ba1c8698799"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.866689 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" (UID: "bbf0aa0b-7066-4b1e-9a82-3ba1c8698799"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.885600 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-scripts" (OuterVolumeSpecName: "scripts") pod "bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" (UID: "bbf0aa0b-7066-4b1e-9a82-3ba1c8698799"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.896988 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-kube-api-access-bhrb8" (OuterVolumeSpecName: "kube-api-access-bhrb8") pod "bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" (UID: "bbf0aa0b-7066-4b1e-9a82-3ba1c8698799"). InnerVolumeSpecName "kube-api-access-bhrb8". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.903220 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" (UID: "bbf0aa0b-7066-4b1e-9a82-3ba1c8698799"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.949142 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" (UID: "bbf0aa0b-7066-4b1e-9a82-3ba1c8698799"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.952319 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" (UID: "bbf0aa0b-7066-4b1e-9a82-3ba1c8698799"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.967549 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.967584 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.967595 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.967603 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.967611 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.967619 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bhrb8\" (UniqueName: \"kubernetes.io/projected/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-kube-api-access-bhrb8\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.967628 4818 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:09 crc kubenswrapper[4818]: I0930 17:28:09.998279 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-config-data" (OuterVolumeSpecName: "config-data") pod "bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" (UID: "bbf0aa0b-7066-4b1e-9a82-3ba1c8698799"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.041748 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e4b8482-5f36-4a55-847e-c9a6c9ba65a5" path="/var/lib/kubelet/pods/6e4b8482-5f36-4a55-847e-c9a6c9ba65a5/volumes" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.042636 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b23764d-cc98-4adc-8770-583784a65fc8" path="/var/lib/kubelet/pods/7b23764d-cc98-4adc-8770-583784a65fc8/volumes" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.043209 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de9cb9c1-0637-4973-8294-8fad4c871099" path="/var/lib/kubelet/pods/de9cb9c1-0637-4973-8294-8fad4c871099/volumes" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.071013 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.085756 4818 generic.go:334] "Generic (PLEG): container finished" podID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerID="6e14aa00f7fab6ae289e8be0808d83940c109b4588dd735d37745813a22054c4" exitCode=0 Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.085828 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799","Type":"ContainerDied","Data":"6e14aa00f7fab6ae289e8be0808d83940c109b4588dd735d37745813a22054c4"} Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.085851 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.085872 4818 scope.go:117] "RemoveContainer" containerID="57fb9317639a10d0db171ad5a43d68c634b34c6c5011ddddfb7b1466501a3ac1" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.085857 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"bbf0aa0b-7066-4b1e-9a82-3ba1c8698799","Type":"ContainerDied","Data":"3e39e2fa66341c2652d198e0863d4b41ff1186627c9f7017458574b84113190d"} Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.088663 4818 generic.go:334] "Generic (PLEG): container finished" podID="c748315b-87b0-4452-afb3-268ac343bbce" containerID="75bec3ab40967464e3ec8ee9356f3f1769a0927d7e7dcf51b77530b18f62109f" exitCode=0 Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.088760 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watchera500-account-delete-z9hn2" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.089111 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"c748315b-87b0-4452-afb3-268ac343bbce","Type":"ContainerDied","Data":"75bec3ab40967464e3ec8ee9356f3f1769a0927d7e7dcf51b77530b18f62109f"} Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.090364 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-5g6kd"] Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.116391 4818 scope.go:117] "RemoveContainer" containerID="9a338cd69f46cef6219ffa5e8c9b479024a72d97e7cd5809e3762143ab150d7d" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.127507 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.180426 4818 scope.go:117] "RemoveContainer" containerID="6e14aa00f7fab6ae289e8be0808d83940c109b4588dd735d37745813a22054c4" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.188890 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.219459 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:28:10 crc kubenswrapper[4818]: E0930 17:28:10.220183 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerName="ceilometer-central-agent" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.220197 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerName="ceilometer-central-agent" Sep 30 17:28:10 crc kubenswrapper[4818]: E0930 17:28:10.220230 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerName="proxy-httpd" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.220237 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerName="proxy-httpd" Sep 30 17:28:10 crc kubenswrapper[4818]: E0930 17:28:10.220249 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerName="ceilometer-notification-agent" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.220257 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerName="ceilometer-notification-agent" Sep 30 17:28:10 crc kubenswrapper[4818]: E0930 17:28:10.220313 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerName="sg-core" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.220320 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerName="sg-core" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.220569 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerName="sg-core" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.220585 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerName="ceilometer-notification-agent" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.220595 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerName="proxy-httpd" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.220604 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" containerName="ceilometer-central-agent" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.235054 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.225096 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.239112 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.239347 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.240127 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.251048 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watchera500-account-delete-z9hn2"] Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.278581 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-run-httpd\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.278800 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8snz7\" (UniqueName: \"kubernetes.io/projected/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-kube-api-access-8snz7\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.278882 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-log-httpd\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.279009 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-scripts\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.279679 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.279773 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-config-data\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.279855 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.279961 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.283463 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watchera500-account-delete-z9hn2"] Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.287602 4818 scope.go:117] "RemoveContainer" containerID="672ffa7c8fe910d41c3d41d43f4f359b47ed7a54939f685edcb0b9d0ee9762e0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.332528 4818 scope.go:117] "RemoveContainer" containerID="57fb9317639a10d0db171ad5a43d68c634b34c6c5011ddddfb7b1466501a3ac1" Sep 30 17:28:10 crc kubenswrapper[4818]: E0930 17:28:10.332974 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57fb9317639a10d0db171ad5a43d68c634b34c6c5011ddddfb7b1466501a3ac1\": container with ID starting with 57fb9317639a10d0db171ad5a43d68c634b34c6c5011ddddfb7b1466501a3ac1 not found: ID does not exist" containerID="57fb9317639a10d0db171ad5a43d68c634b34c6c5011ddddfb7b1466501a3ac1" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.333009 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57fb9317639a10d0db171ad5a43d68c634b34c6c5011ddddfb7b1466501a3ac1"} err="failed to get container status \"57fb9317639a10d0db171ad5a43d68c634b34c6c5011ddddfb7b1466501a3ac1\": rpc error: code = NotFound desc = could not find container \"57fb9317639a10d0db171ad5a43d68c634b34c6c5011ddddfb7b1466501a3ac1\": container with ID starting with 57fb9317639a10d0db171ad5a43d68c634b34c6c5011ddddfb7b1466501a3ac1 not found: ID does not exist" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.333029 4818 scope.go:117] "RemoveContainer" containerID="9a338cd69f46cef6219ffa5e8c9b479024a72d97e7cd5809e3762143ab150d7d" Sep 30 17:28:10 crc kubenswrapper[4818]: E0930 17:28:10.333778 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a338cd69f46cef6219ffa5e8c9b479024a72d97e7cd5809e3762143ab150d7d\": container with ID starting with 9a338cd69f46cef6219ffa5e8c9b479024a72d97e7cd5809e3762143ab150d7d not found: ID does not exist" containerID="9a338cd69f46cef6219ffa5e8c9b479024a72d97e7cd5809e3762143ab150d7d" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.333806 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a338cd69f46cef6219ffa5e8c9b479024a72d97e7cd5809e3762143ab150d7d"} err="failed to get container status \"9a338cd69f46cef6219ffa5e8c9b479024a72d97e7cd5809e3762143ab150d7d\": rpc error: code = NotFound desc = could not find container \"9a338cd69f46cef6219ffa5e8c9b479024a72d97e7cd5809e3762143ab150d7d\": container with ID starting with 9a338cd69f46cef6219ffa5e8c9b479024a72d97e7cd5809e3762143ab150d7d not found: ID does not exist" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.333820 4818 scope.go:117] "RemoveContainer" containerID="6e14aa00f7fab6ae289e8be0808d83940c109b4588dd735d37745813a22054c4" Sep 30 17:28:10 crc kubenswrapper[4818]: E0930 17:28:10.340838 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e14aa00f7fab6ae289e8be0808d83940c109b4588dd735d37745813a22054c4\": container with ID starting with 6e14aa00f7fab6ae289e8be0808d83940c109b4588dd735d37745813a22054c4 not found: ID does not exist" containerID="6e14aa00f7fab6ae289e8be0808d83940c109b4588dd735d37745813a22054c4" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.340880 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e14aa00f7fab6ae289e8be0808d83940c109b4588dd735d37745813a22054c4"} err="failed to get container status \"6e14aa00f7fab6ae289e8be0808d83940c109b4588dd735d37745813a22054c4\": rpc error: code = NotFound desc = could not find container \"6e14aa00f7fab6ae289e8be0808d83940c109b4588dd735d37745813a22054c4\": container with ID starting with 6e14aa00f7fab6ae289e8be0808d83940c109b4588dd735d37745813a22054c4 not found: ID does not exist" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.340903 4818 scope.go:117] "RemoveContainer" containerID="672ffa7c8fe910d41c3d41d43f4f359b47ed7a54939f685edcb0b9d0ee9762e0" Sep 30 17:28:10 crc kubenswrapper[4818]: E0930 17:28:10.341682 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"672ffa7c8fe910d41c3d41d43f4f359b47ed7a54939f685edcb0b9d0ee9762e0\": container with ID starting with 672ffa7c8fe910d41c3d41d43f4f359b47ed7a54939f685edcb0b9d0ee9762e0 not found: ID does not exist" containerID="672ffa7c8fe910d41c3d41d43f4f359b47ed7a54939f685edcb0b9d0ee9762e0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.341709 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"672ffa7c8fe910d41c3d41d43f4f359b47ed7a54939f685edcb0b9d0ee9762e0"} err="failed to get container status \"672ffa7c8fe910d41c3d41d43f4f359b47ed7a54939f685edcb0b9d0ee9762e0\": rpc error: code = NotFound desc = could not find container \"672ffa7c8fe910d41c3d41d43f4f359b47ed7a54939f685edcb0b9d0ee9762e0\": container with ID starting with 672ffa7c8fe910d41c3d41d43f4f359b47ed7a54939f685edcb0b9d0ee9762e0 not found: ID does not exist" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.380980 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.381340 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-run-httpd\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.381388 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8snz7\" (UniqueName: \"kubernetes.io/projected/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-kube-api-access-8snz7\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.381430 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-log-httpd\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.381487 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-scripts\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.381525 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.381563 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-config-data\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.381583 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.381607 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.381660 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-run-httpd\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.381844 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-log-httpd\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.387371 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.387407 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.387737 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.388303 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-config-data\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.388887 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-scripts\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.402620 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8snz7\" (UniqueName: \"kubernetes.io/projected/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-kube-api-access-8snz7\") pod \"ceilometer-0\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:10 crc kubenswrapper[4818]: E0930 17:28:10.442257 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="30f8724d1ea0ea24d1175f50d66adbdf2f3be4c36999305a3140e188f7e1db92" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 30 17:28:10 crc kubenswrapper[4818]: E0930 17:28:10.444599 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="30f8724d1ea0ea24d1175f50d66adbdf2f3be4c36999305a3140e188f7e1db92" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 30 17:28:10 crc kubenswrapper[4818]: E0930 17:28:10.454790 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="30f8724d1ea0ea24d1175f50d66adbdf2f3be4c36999305a3140e188f7e1db92" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 30 17:28:10 crc kubenswrapper[4818]: E0930 17:28:10.454857 4818 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="a703aca6-ed46-4141-8cfb-e5565daec9f4" containerName="watcher-applier" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.483604 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c748315b-87b0-4452-afb3-268ac343bbce-logs\") pod \"c748315b-87b0-4452-afb3-268ac343bbce\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.483693 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-cert-memcached-mtls\") pod \"c748315b-87b0-4452-afb3-268ac343bbce\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.483750 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-custom-prometheus-ca\") pod \"c748315b-87b0-4452-afb3-268ac343bbce\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.484332 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-config-data\") pod \"c748315b-87b0-4452-afb3-268ac343bbce\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.484390 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-combined-ca-bundle\") pod \"c748315b-87b0-4452-afb3-268ac343bbce\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.484445 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gw62m\" (UniqueName: \"kubernetes.io/projected/c748315b-87b0-4452-afb3-268ac343bbce-kube-api-access-gw62m\") pod \"c748315b-87b0-4452-afb3-268ac343bbce\" (UID: \"c748315b-87b0-4452-afb3-268ac343bbce\") " Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.485706 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c748315b-87b0-4452-afb3-268ac343bbce-logs" (OuterVolumeSpecName: "logs") pod "c748315b-87b0-4452-afb3-268ac343bbce" (UID: "c748315b-87b0-4452-afb3-268ac343bbce"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.487782 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c748315b-87b0-4452-afb3-268ac343bbce-kube-api-access-gw62m" (OuterVolumeSpecName: "kube-api-access-gw62m") pod "c748315b-87b0-4452-afb3-268ac343bbce" (UID: "c748315b-87b0-4452-afb3-268ac343bbce"). InnerVolumeSpecName "kube-api-access-gw62m". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.505866 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "c748315b-87b0-4452-afb3-268ac343bbce" (UID: "c748315b-87b0-4452-afb3-268ac343bbce"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.514180 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c748315b-87b0-4452-afb3-268ac343bbce" (UID: "c748315b-87b0-4452-afb3-268ac343bbce"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.543229 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-config-data" (OuterVolumeSpecName: "config-data") pod "c748315b-87b0-4452-afb3-268ac343bbce" (UID: "c748315b-87b0-4452-afb3-268ac343bbce"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.575601 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "c748315b-87b0-4452-afb3-268ac343bbce" (UID: "c748315b-87b0-4452-afb3-268ac343bbce"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.586782 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c748315b-87b0-4452-afb3-268ac343bbce-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.586832 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.586845 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.586854 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.586862 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c748315b-87b0-4452-afb3-268ac343bbce-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.586871 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gw62m\" (UniqueName: \"kubernetes.io/projected/c748315b-87b0-4452-afb3-268ac343bbce-kube-api-access-gw62m\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:10 crc kubenswrapper[4818]: I0930 17:28:10.595442 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:11 crc kubenswrapper[4818]: I0930 17:28:11.065440 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:28:11 crc kubenswrapper[4818]: W0930 17:28:11.069081 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf91b6622_c8d2_498b_a43f_7c468ed9ffe3.slice/crio-4876f21af18382fdd44765672e3f17dc52457a0675d7c3a4e9c6573a5645b3e1 WatchSource:0}: Error finding container 4876f21af18382fdd44765672e3f17dc52457a0675d7c3a4e9c6573a5645b3e1: Status 404 returned error can't find the container with id 4876f21af18382fdd44765672e3f17dc52457a0675d7c3a4e9c6573a5645b3e1 Sep 30 17:28:11 crc kubenswrapper[4818]: I0930 17:28:11.099032 4818 generic.go:334] "Generic (PLEG): container finished" podID="5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc" containerID="817bb379866e07743faebf871207d7a7068346de9d13c89d998266a97553bdf5" exitCode=0 Sep 30 17:28:11 crc kubenswrapper[4818]: I0930 17:28:11.099122 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-5g6kd" event={"ID":"5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc","Type":"ContainerDied","Data":"817bb379866e07743faebf871207d7a7068346de9d13c89d998266a97553bdf5"} Sep 30 17:28:11 crc kubenswrapper[4818]: I0930 17:28:11.099152 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-5g6kd" event={"ID":"5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc","Type":"ContainerStarted","Data":"9879fd9ca5f2172571b4eb176624593bb63e8d939cab623cd180c9d6eeec9f84"} Sep 30 17:28:11 crc kubenswrapper[4818]: I0930 17:28:11.100841 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f91b6622-c8d2-498b-a43f-7c468ed9ffe3","Type":"ContainerStarted","Data":"4876f21af18382fdd44765672e3f17dc52457a0675d7c3a4e9c6573a5645b3e1"} Sep 30 17:28:11 crc kubenswrapper[4818]: I0930 17:28:11.103007 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"c748315b-87b0-4452-afb3-268ac343bbce","Type":"ContainerDied","Data":"720c8ead865a78e9450baa76d90c78b04bb20ab7bf45edf8ba130c74f832a242"} Sep 30 17:28:11 crc kubenswrapper[4818]: I0930 17:28:11.103045 4818 scope.go:117] "RemoveContainer" containerID="75bec3ab40967464e3ec8ee9356f3f1769a0927d7e7dcf51b77530b18f62109f" Sep 30 17:28:11 crc kubenswrapper[4818]: I0930 17:28:11.103143 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:11 crc kubenswrapper[4818]: I0930 17:28:11.129952 4818 scope.go:117] "RemoveContainer" containerID="5aa56bc2c06eb9479b9cdff31ffb50628bb171342b32c3a0e850c3a7a65c04b1" Sep 30 17:28:11 crc kubenswrapper[4818]: I0930 17:28:11.148053 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:28:11 crc kubenswrapper[4818]: I0930 17:28:11.155518 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:28:11 crc kubenswrapper[4818]: I0930 17:28:11.675294 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.029432 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90809275-11d8-49b5-8a9b-2797b95657b8" path="/var/lib/kubelet/pods/90809275-11d8-49b5-8a9b-2797b95657b8/volumes" Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.029842 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bbf0aa0b-7066-4b1e-9a82-3ba1c8698799" path="/var/lib/kubelet/pods/bbf0aa0b-7066-4b1e-9a82-3ba1c8698799/volumes" Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.030548 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c748315b-87b0-4452-afb3-268ac343bbce" path="/var/lib/kubelet/pods/c748315b-87b0-4452-afb3-268ac343bbce/volumes" Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.117434 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f91b6622-c8d2-498b-a43f-7c468ed9ffe3","Type":"ContainerStarted","Data":"0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4"} Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.565469 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-5g6kd" Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.619305 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ps6hj\" (UniqueName: \"kubernetes.io/projected/5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc-kube-api-access-ps6hj\") pod \"5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc\" (UID: \"5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc\") " Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.632605 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc-kube-api-access-ps6hj" (OuterVolumeSpecName: "kube-api-access-ps6hj") pod "5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc" (UID: "5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc"). InnerVolumeSpecName "kube-api-access-ps6hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.645124 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.726992 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a703aca6-ed46-4141-8cfb-e5565daec9f4-logs\") pod \"a703aca6-ed46-4141-8cfb-e5565daec9f4\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.727053 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a703aca6-ed46-4141-8cfb-e5565daec9f4-config-data\") pod \"a703aca6-ed46-4141-8cfb-e5565daec9f4\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.727091 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a703aca6-ed46-4141-8cfb-e5565daec9f4-cert-memcached-mtls\") pod \"a703aca6-ed46-4141-8cfb-e5565daec9f4\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.727186 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f7cc2\" (UniqueName: \"kubernetes.io/projected/a703aca6-ed46-4141-8cfb-e5565daec9f4-kube-api-access-f7cc2\") pod \"a703aca6-ed46-4141-8cfb-e5565daec9f4\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.727212 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a703aca6-ed46-4141-8cfb-e5565daec9f4-combined-ca-bundle\") pod \"a703aca6-ed46-4141-8cfb-e5565daec9f4\" (UID: \"a703aca6-ed46-4141-8cfb-e5565daec9f4\") " Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.727329 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a703aca6-ed46-4141-8cfb-e5565daec9f4-logs" (OuterVolumeSpecName: "logs") pod "a703aca6-ed46-4141-8cfb-e5565daec9f4" (UID: "a703aca6-ed46-4141-8cfb-e5565daec9f4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.727547 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ps6hj\" (UniqueName: \"kubernetes.io/projected/5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc-kube-api-access-ps6hj\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.727569 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a703aca6-ed46-4141-8cfb-e5565daec9f4-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.734081 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a703aca6-ed46-4141-8cfb-e5565daec9f4-kube-api-access-f7cc2" (OuterVolumeSpecName: "kube-api-access-f7cc2") pod "a703aca6-ed46-4141-8cfb-e5565daec9f4" (UID: "a703aca6-ed46-4141-8cfb-e5565daec9f4"). InnerVolumeSpecName "kube-api-access-f7cc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.755821 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a703aca6-ed46-4141-8cfb-e5565daec9f4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a703aca6-ed46-4141-8cfb-e5565daec9f4" (UID: "a703aca6-ed46-4141-8cfb-e5565daec9f4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.769442 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a703aca6-ed46-4141-8cfb-e5565daec9f4-config-data" (OuterVolumeSpecName: "config-data") pod "a703aca6-ed46-4141-8cfb-e5565daec9f4" (UID: "a703aca6-ed46-4141-8cfb-e5565daec9f4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.800162 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a703aca6-ed46-4141-8cfb-e5565daec9f4-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "a703aca6-ed46-4141-8cfb-e5565daec9f4" (UID: "a703aca6-ed46-4141-8cfb-e5565daec9f4"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.829308 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a703aca6-ed46-4141-8cfb-e5565daec9f4-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.829340 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a703aca6-ed46-4141-8cfb-e5565daec9f4-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.829350 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f7cc2\" (UniqueName: \"kubernetes.io/projected/a703aca6-ed46-4141-8cfb-e5565daec9f4-kube-api-access-f7cc2\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:12 crc kubenswrapper[4818]: I0930 17:28:12.829359 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a703aca6-ed46-4141-8cfb-e5565daec9f4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:13 crc kubenswrapper[4818]: I0930 17:28:13.137540 4818 generic.go:334] "Generic (PLEG): container finished" podID="a703aca6-ed46-4141-8cfb-e5565daec9f4" containerID="30f8724d1ea0ea24d1175f50d66adbdf2f3be4c36999305a3140e188f7e1db92" exitCode=0 Sep 30 17:28:13 crc kubenswrapper[4818]: I0930 17:28:13.137603 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:13 crc kubenswrapper[4818]: I0930 17:28:13.137630 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"a703aca6-ed46-4141-8cfb-e5565daec9f4","Type":"ContainerDied","Data":"30f8724d1ea0ea24d1175f50d66adbdf2f3be4c36999305a3140e188f7e1db92"} Sep 30 17:28:13 crc kubenswrapper[4818]: I0930 17:28:13.137658 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"a703aca6-ed46-4141-8cfb-e5565daec9f4","Type":"ContainerDied","Data":"04cba75518053006eb92be2bbda1e8e9b27fe3629c9f4331fe2527398c8ae1c5"} Sep 30 17:28:13 crc kubenswrapper[4818]: I0930 17:28:13.137674 4818 scope.go:117] "RemoveContainer" containerID="30f8724d1ea0ea24d1175f50d66adbdf2f3be4c36999305a3140e188f7e1db92" Sep 30 17:28:13 crc kubenswrapper[4818]: I0930 17:28:13.141229 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-5g6kd" event={"ID":"5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc","Type":"ContainerDied","Data":"9879fd9ca5f2172571b4eb176624593bb63e8d939cab623cd180c9d6eeec9f84"} Sep 30 17:28:13 crc kubenswrapper[4818]: I0930 17:28:13.141279 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9879fd9ca5f2172571b4eb176624593bb63e8d939cab623cd180c9d6eeec9f84" Sep 30 17:28:13 crc kubenswrapper[4818]: I0930 17:28:13.141342 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-5g6kd" Sep 30 17:28:13 crc kubenswrapper[4818]: I0930 17:28:13.150396 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f91b6622-c8d2-498b-a43f-7c468ed9ffe3","Type":"ContainerStarted","Data":"c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29"} Sep 30 17:28:13 crc kubenswrapper[4818]: I0930 17:28:13.165447 4818 scope.go:117] "RemoveContainer" containerID="30f8724d1ea0ea24d1175f50d66adbdf2f3be4c36999305a3140e188f7e1db92" Sep 30 17:28:13 crc kubenswrapper[4818]: E0930 17:28:13.166056 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30f8724d1ea0ea24d1175f50d66adbdf2f3be4c36999305a3140e188f7e1db92\": container with ID starting with 30f8724d1ea0ea24d1175f50d66adbdf2f3be4c36999305a3140e188f7e1db92 not found: ID does not exist" containerID="30f8724d1ea0ea24d1175f50d66adbdf2f3be4c36999305a3140e188f7e1db92" Sep 30 17:28:13 crc kubenswrapper[4818]: I0930 17:28:13.166108 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30f8724d1ea0ea24d1175f50d66adbdf2f3be4c36999305a3140e188f7e1db92"} err="failed to get container status \"30f8724d1ea0ea24d1175f50d66adbdf2f3be4c36999305a3140e188f7e1db92\": rpc error: code = NotFound desc = could not find container \"30f8724d1ea0ea24d1175f50d66adbdf2f3be4c36999305a3140e188f7e1db92\": container with ID starting with 30f8724d1ea0ea24d1175f50d66adbdf2f3be4c36999305a3140e188f7e1db92 not found: ID does not exist" Sep 30 17:28:13 crc kubenswrapper[4818]: I0930 17:28:13.187122 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:28:13 crc kubenswrapper[4818]: I0930 17:28:13.193846 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:28:14 crc kubenswrapper[4818]: I0930 17:28:14.028124 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:28:14 crc kubenswrapper[4818]: E0930 17:28:14.028585 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:28:14 crc kubenswrapper[4818]: I0930 17:28:14.031138 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a703aca6-ed46-4141-8cfb-e5565daec9f4" path="/var/lib/kubelet/pods/a703aca6-ed46-4141-8cfb-e5565daec9f4/volumes" Sep 30 17:28:14 crc kubenswrapper[4818]: I0930 17:28:14.159366 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f91b6622-c8d2-498b-a43f-7c468ed9ffe3","Type":"ContainerStarted","Data":"736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4"} Sep 30 17:28:15 crc kubenswrapper[4818]: I0930 17:28:15.176680 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f91b6622-c8d2-498b-a43f-7c468ed9ffe3","Type":"ContainerStarted","Data":"10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8"} Sep 30 17:28:15 crc kubenswrapper[4818]: I0930 17:28:15.177118 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:15 crc kubenswrapper[4818]: I0930 17:28:15.176902 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerName="ceilometer-central-agent" containerID="cri-o://0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4" gracePeriod=30 Sep 30 17:28:15 crc kubenswrapper[4818]: I0930 17:28:15.177238 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerName="sg-core" containerID="cri-o://736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4" gracePeriod=30 Sep 30 17:28:15 crc kubenswrapper[4818]: I0930 17:28:15.177274 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerName="ceilometer-notification-agent" containerID="cri-o://c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29" gracePeriod=30 Sep 30 17:28:15 crc kubenswrapper[4818]: I0930 17:28:15.177414 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerName="proxy-httpd" containerID="cri-o://10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8" gracePeriod=30 Sep 30 17:28:15 crc kubenswrapper[4818]: I0930 17:28:15.230281 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=2.005328792 podStartE2EDuration="5.230248004s" podCreationTimestamp="2025-09-30 17:28:10 +0000 UTC" firstStartedPulling="2025-09-30 17:28:11.07336311 +0000 UTC m=+1737.827634926" lastFinishedPulling="2025-09-30 17:28:14.298282322 +0000 UTC m=+1741.052554138" observedRunningTime="2025-09-30 17:28:15.219585336 +0000 UTC m=+1741.973857192" watchObservedRunningTime="2025-09-30 17:28:15.230248004 +0000 UTC m=+1741.984519860" Sep 30 17:28:15 crc kubenswrapper[4818]: E0930 17:28:15.429599 4818 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf91b6622_c8d2_498b_a43f_7c468ed9ffe3.slice/crio-conmon-736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf91b6622_c8d2_498b_a43f_7c468ed9ffe3.slice/crio-10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf91b6622_c8d2_498b_a43f_7c468ed9ffe3.slice/crio-conmon-10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8.scope\": RecentStats: unable to find data in memory cache]" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.082506 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.186727 4818 generic.go:334] "Generic (PLEG): container finished" podID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerID="10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8" exitCode=0 Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.186756 4818 generic.go:334] "Generic (PLEG): container finished" podID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerID="736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4" exitCode=2 Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.186764 4818 generic.go:334] "Generic (PLEG): container finished" podID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerID="c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29" exitCode=0 Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.186773 4818 generic.go:334] "Generic (PLEG): container finished" podID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerID="0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4" exitCode=0 Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.186792 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f91b6622-c8d2-498b-a43f-7c468ed9ffe3","Type":"ContainerDied","Data":"10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8"} Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.186818 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f91b6622-c8d2-498b-a43f-7c468ed9ffe3","Type":"ContainerDied","Data":"736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4"} Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.186831 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f91b6622-c8d2-498b-a43f-7c468ed9ffe3","Type":"ContainerDied","Data":"c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29"} Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.186841 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f91b6622-c8d2-498b-a43f-7c468ed9ffe3","Type":"ContainerDied","Data":"0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4"} Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.186850 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f91b6622-c8d2-498b-a43f-7c468ed9ffe3","Type":"ContainerDied","Data":"4876f21af18382fdd44765672e3f17dc52457a0675d7c3a4e9c6573a5645b3e1"} Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.186864 4818 scope.go:117] "RemoveContainer" containerID="10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.187005 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.190415 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8snz7\" (UniqueName: \"kubernetes.io/projected/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-kube-api-access-8snz7\") pod \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.190468 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-run-httpd\") pod \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.190539 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-log-httpd\") pod \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.190560 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-config-data\") pod \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.190608 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-scripts\") pod \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.190627 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-sg-core-conf-yaml\") pod \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.190651 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-combined-ca-bundle\") pod \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.190683 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-ceilometer-tls-certs\") pod \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\" (UID: \"f91b6622-c8d2-498b-a43f-7c468ed9ffe3\") " Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.192722 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f91b6622-c8d2-498b-a43f-7c468ed9ffe3" (UID: "f91b6622-c8d2-498b-a43f-7c468ed9ffe3"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.193104 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f91b6622-c8d2-498b-a43f-7c468ed9ffe3" (UID: "f91b6622-c8d2-498b-a43f-7c468ed9ffe3"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.195830 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-kube-api-access-8snz7" (OuterVolumeSpecName: "kube-api-access-8snz7") pod "f91b6622-c8d2-498b-a43f-7c468ed9ffe3" (UID: "f91b6622-c8d2-498b-a43f-7c468ed9ffe3"). InnerVolumeSpecName "kube-api-access-8snz7". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.197060 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-scripts" (OuterVolumeSpecName: "scripts") pod "f91b6622-c8d2-498b-a43f-7c468ed9ffe3" (UID: "f91b6622-c8d2-498b-a43f-7c468ed9ffe3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.208024 4818 scope.go:117] "RemoveContainer" containerID="736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.232299 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f91b6622-c8d2-498b-a43f-7c468ed9ffe3" (UID: "f91b6622-c8d2-498b-a43f-7c468ed9ffe3"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.233168 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "f91b6622-c8d2-498b-a43f-7c468ed9ffe3" (UID: "f91b6622-c8d2-498b-a43f-7c468ed9ffe3"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.263145 4818 scope.go:117] "RemoveContainer" containerID="c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.263584 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f91b6622-c8d2-498b-a43f-7c468ed9ffe3" (UID: "f91b6622-c8d2-498b-a43f-7c468ed9ffe3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.283230 4818 scope.go:117] "RemoveContainer" containerID="0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.292342 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8snz7\" (UniqueName: \"kubernetes.io/projected/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-kube-api-access-8snz7\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.292382 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.292395 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.292406 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.292417 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.292428 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.292439 4818 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.298960 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-config-data" (OuterVolumeSpecName: "config-data") pod "f91b6622-c8d2-498b-a43f-7c468ed9ffe3" (UID: "f91b6622-c8d2-498b-a43f-7c468ed9ffe3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.300870 4818 scope.go:117] "RemoveContainer" containerID="10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8" Sep 30 17:28:16 crc kubenswrapper[4818]: E0930 17:28:16.301441 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8\": container with ID starting with 10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8 not found: ID does not exist" containerID="10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.301488 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8"} err="failed to get container status \"10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8\": rpc error: code = NotFound desc = could not find container \"10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8\": container with ID starting with 10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8 not found: ID does not exist" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.301512 4818 scope.go:117] "RemoveContainer" containerID="736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4" Sep 30 17:28:16 crc kubenswrapper[4818]: E0930 17:28:16.301829 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4\": container with ID starting with 736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4 not found: ID does not exist" containerID="736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.301883 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4"} err="failed to get container status \"736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4\": rpc error: code = NotFound desc = could not find container \"736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4\": container with ID starting with 736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4 not found: ID does not exist" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.301915 4818 scope.go:117] "RemoveContainer" containerID="c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29" Sep 30 17:28:16 crc kubenswrapper[4818]: E0930 17:28:16.302306 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29\": container with ID starting with c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29 not found: ID does not exist" containerID="c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.302334 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29"} err="failed to get container status \"c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29\": rpc error: code = NotFound desc = could not find container \"c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29\": container with ID starting with c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29 not found: ID does not exist" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.302350 4818 scope.go:117] "RemoveContainer" containerID="0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4" Sep 30 17:28:16 crc kubenswrapper[4818]: E0930 17:28:16.302599 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4\": container with ID starting with 0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4 not found: ID does not exist" containerID="0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.302622 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4"} err="failed to get container status \"0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4\": rpc error: code = NotFound desc = could not find container \"0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4\": container with ID starting with 0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4 not found: ID does not exist" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.302636 4818 scope.go:117] "RemoveContainer" containerID="10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.302853 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8"} err="failed to get container status \"10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8\": rpc error: code = NotFound desc = could not find container \"10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8\": container with ID starting with 10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8 not found: ID does not exist" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.302872 4818 scope.go:117] "RemoveContainer" containerID="736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.303081 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4"} err="failed to get container status \"736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4\": rpc error: code = NotFound desc = could not find container \"736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4\": container with ID starting with 736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4 not found: ID does not exist" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.303101 4818 scope.go:117] "RemoveContainer" containerID="c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.303287 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29"} err="failed to get container status \"c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29\": rpc error: code = NotFound desc = could not find container \"c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29\": container with ID starting with c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29 not found: ID does not exist" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.303304 4818 scope.go:117] "RemoveContainer" containerID="0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.303485 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4"} err="failed to get container status \"0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4\": rpc error: code = NotFound desc = could not find container \"0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4\": container with ID starting with 0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4 not found: ID does not exist" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.303508 4818 scope.go:117] "RemoveContainer" containerID="10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.303803 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8"} err="failed to get container status \"10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8\": rpc error: code = NotFound desc = could not find container \"10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8\": container with ID starting with 10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8 not found: ID does not exist" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.303825 4818 scope.go:117] "RemoveContainer" containerID="736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.304024 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4"} err="failed to get container status \"736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4\": rpc error: code = NotFound desc = could not find container \"736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4\": container with ID starting with 736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4 not found: ID does not exist" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.304043 4818 scope.go:117] "RemoveContainer" containerID="c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.304450 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29"} err="failed to get container status \"c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29\": rpc error: code = NotFound desc = could not find container \"c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29\": container with ID starting with c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29 not found: ID does not exist" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.304471 4818 scope.go:117] "RemoveContainer" containerID="0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.304664 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4"} err="failed to get container status \"0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4\": rpc error: code = NotFound desc = could not find container \"0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4\": container with ID starting with 0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4 not found: ID does not exist" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.304683 4818 scope.go:117] "RemoveContainer" containerID="10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.304862 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8"} err="failed to get container status \"10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8\": rpc error: code = NotFound desc = could not find container \"10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8\": container with ID starting with 10c1b0dd07356520833542d06904950077cd936ee5c6761a359f5ae8e6b5f0b8 not found: ID does not exist" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.304880 4818 scope.go:117] "RemoveContainer" containerID="736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.305075 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4"} err="failed to get container status \"736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4\": rpc error: code = NotFound desc = could not find container \"736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4\": container with ID starting with 736f147fad8b55bd365c9ad08dd105a53679d9dafaa3b365ad965bb60ce9bbe4 not found: ID does not exist" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.305094 4818 scope.go:117] "RemoveContainer" containerID="c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.305278 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29"} err="failed to get container status \"c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29\": rpc error: code = NotFound desc = could not find container \"c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29\": container with ID starting with c01b7c57034dfa77bab0e233397e7781994294ddcbc5ab2f4359b80067b31a29 not found: ID does not exist" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.305296 4818 scope.go:117] "RemoveContainer" containerID="0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.305543 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4"} err="failed to get container status \"0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4\": rpc error: code = NotFound desc = could not find container \"0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4\": container with ID starting with 0ec7a8aad30f1d447db785d1cc1c248a2ade9e2238362738726b229344c90fd4 not found: ID does not exist" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.394310 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f91b6622-c8d2-498b-a43f-7c468ed9ffe3-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.551589 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.560332 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.569951 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:28:16 crc kubenswrapper[4818]: E0930 17:28:16.570361 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a703aca6-ed46-4141-8cfb-e5565daec9f4" containerName="watcher-applier" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.570379 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="a703aca6-ed46-4141-8cfb-e5565daec9f4" containerName="watcher-applier" Sep 30 17:28:16 crc kubenswrapper[4818]: E0930 17:28:16.570399 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c748315b-87b0-4452-afb3-268ac343bbce" containerName="watcher-api" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.570406 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="c748315b-87b0-4452-afb3-268ac343bbce" containerName="watcher-api" Sep 30 17:28:16 crc kubenswrapper[4818]: E0930 17:28:16.570420 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerName="ceilometer-notification-agent" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.570429 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerName="ceilometer-notification-agent" Sep 30 17:28:16 crc kubenswrapper[4818]: E0930 17:28:16.570442 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc" containerName="mariadb-database-create" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.570450 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc" containerName="mariadb-database-create" Sep 30 17:28:16 crc kubenswrapper[4818]: E0930 17:28:16.570464 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerName="proxy-httpd" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.570472 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerName="proxy-httpd" Sep 30 17:28:16 crc kubenswrapper[4818]: E0930 17:28:16.570492 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerName="sg-core" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.570498 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerName="sg-core" Sep 30 17:28:16 crc kubenswrapper[4818]: E0930 17:28:16.570519 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c748315b-87b0-4452-afb3-268ac343bbce" containerName="watcher-kuttl-api-log" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.570528 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="c748315b-87b0-4452-afb3-268ac343bbce" containerName="watcher-kuttl-api-log" Sep 30 17:28:16 crc kubenswrapper[4818]: E0930 17:28:16.570542 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerName="ceilometer-central-agent" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.570549 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerName="ceilometer-central-agent" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.570731 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="c748315b-87b0-4452-afb3-268ac343bbce" containerName="watcher-api" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.570748 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="c748315b-87b0-4452-afb3-268ac343bbce" containerName="watcher-kuttl-api-log" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.570761 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerName="sg-core" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.570771 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerName="proxy-httpd" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.570794 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc" containerName="mariadb-database-create" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.570804 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="a703aca6-ed46-4141-8cfb-e5565daec9f4" containerName="watcher-applier" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.570814 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerName="ceilometer-notification-agent" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.570827 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" containerName="ceilometer-central-agent" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.572562 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.577394 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.577554 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.577638 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.598450 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.698958 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.699018 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00605988-b664-47d9-8a19-104673424799-log-httpd\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.699068 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00605988-b664-47d9-8a19-104673424799-run-httpd\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.699093 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.699112 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.699173 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-scripts\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.699190 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62v22\" (UniqueName: \"kubernetes.io/projected/00605988-b664-47d9-8a19-104673424799-kube-api-access-62v22\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.699251 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-config-data\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.800683 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00605988-b664-47d9-8a19-104673424799-run-httpd\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.800729 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.800749 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.800777 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-scripts\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.800797 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62v22\" (UniqueName: \"kubernetes.io/projected/00605988-b664-47d9-8a19-104673424799-kube-api-access-62v22\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.800847 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-config-data\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.800879 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.800905 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00605988-b664-47d9-8a19-104673424799-log-httpd\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.801360 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00605988-b664-47d9-8a19-104673424799-log-httpd\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.801465 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00605988-b664-47d9-8a19-104673424799-run-httpd\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.805755 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.806301 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-scripts\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.806310 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-config-data\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.806452 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.816578 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.825425 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62v22\" (UniqueName: \"kubernetes.io/projected/00605988-b664-47d9-8a19-104673424799-kube-api-access-62v22\") pod \"ceilometer-0\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:16 crc kubenswrapper[4818]: I0930 17:28:16.888417 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:17 crc kubenswrapper[4818]: W0930 17:28:17.414565 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod00605988_b664_47d9_8a19_104673424799.slice/crio-702ad881d57f037930dafa4880ad22aa9a2a250a5f723a45e7929d6a6dd3a83d WatchSource:0}: Error finding container 702ad881d57f037930dafa4880ad22aa9a2a250a5f723a45e7929d6a6dd3a83d: Status 404 returned error can't find the container with id 702ad881d57f037930dafa4880ad22aa9a2a250a5f723a45e7929d6a6dd3a83d Sep 30 17:28:17 crc kubenswrapper[4818]: I0930 17:28:17.418443 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:28:18 crc kubenswrapper[4818]: I0930 17:28:18.032635 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f91b6622-c8d2-498b-a43f-7c468ed9ffe3" path="/var/lib/kubelet/pods/f91b6622-c8d2-498b-a43f-7c468ed9ffe3/volumes" Sep 30 17:28:18 crc kubenswrapper[4818]: I0930 17:28:18.206260 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"00605988-b664-47d9-8a19-104673424799","Type":"ContainerStarted","Data":"702ad881d57f037930dafa4880ad22aa9a2a250a5f723a45e7929d6a6dd3a83d"} Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.007311 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.144452 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-combined-ca-bundle\") pod \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.144856 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jfmc5\" (UniqueName: \"kubernetes.io/projected/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-kube-api-access-jfmc5\") pod \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.144901 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-config-data\") pod \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.144938 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-custom-prometheus-ca\") pod \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.145023 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-logs\") pod \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.145040 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-cert-memcached-mtls\") pod \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\" (UID: \"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23\") " Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.147252 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-logs" (OuterVolumeSpecName: "logs") pod "c276f56d-cf96-4dd0-b43b-ea9e72d2ec23" (UID: "c276f56d-cf96-4dd0-b43b-ea9e72d2ec23"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.152559 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-kube-api-access-jfmc5" (OuterVolumeSpecName: "kube-api-access-jfmc5") pod "c276f56d-cf96-4dd0-b43b-ea9e72d2ec23" (UID: "c276f56d-cf96-4dd0-b43b-ea9e72d2ec23"). InnerVolumeSpecName "kube-api-access-jfmc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.171777 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c276f56d-cf96-4dd0-b43b-ea9e72d2ec23" (UID: "c276f56d-cf96-4dd0-b43b-ea9e72d2ec23"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.177894 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "c276f56d-cf96-4dd0-b43b-ea9e72d2ec23" (UID: "c276f56d-cf96-4dd0-b43b-ea9e72d2ec23"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.228138 4818 generic.go:334] "Generic (PLEG): container finished" podID="c276f56d-cf96-4dd0-b43b-ea9e72d2ec23" containerID="bb8598e82102294a9eaed0ff410d368bbdb2fe417cc0a8ec7a53df335015949b" exitCode=0 Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.228221 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.228244 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23","Type":"ContainerDied","Data":"bb8598e82102294a9eaed0ff410d368bbdb2fe417cc0a8ec7a53df335015949b"} Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.228311 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"c276f56d-cf96-4dd0-b43b-ea9e72d2ec23","Type":"ContainerDied","Data":"cab820a90ee7eaba644e027b1ba3a9aeb7574a1c57be7339d0b12bcb0c732b08"} Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.228330 4818 scope.go:117] "RemoveContainer" containerID="bb8598e82102294a9eaed0ff410d368bbdb2fe417cc0a8ec7a53df335015949b" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.233683 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-config-data" (OuterVolumeSpecName: "config-data") pod "c276f56d-cf96-4dd0-b43b-ea9e72d2ec23" (UID: "c276f56d-cf96-4dd0-b43b-ea9e72d2ec23"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.235840 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"00605988-b664-47d9-8a19-104673424799","Type":"ContainerStarted","Data":"851326e2127c6cd1652dc683d3cea8187519285118dacaceb44508f0f95d046f"} Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.239747 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-test-account-create-5nf96"] Sep 30 17:28:19 crc kubenswrapper[4818]: E0930 17:28:19.240138 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c276f56d-cf96-4dd0-b43b-ea9e72d2ec23" containerName="watcher-decision-engine" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.240157 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="c276f56d-cf96-4dd0-b43b-ea9e72d2ec23" containerName="watcher-decision-engine" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.240315 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="c276f56d-cf96-4dd0-b43b-ea9e72d2ec23" containerName="watcher-decision-engine" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.240835 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-test-account-create-5nf96" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.243168 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-db-secret" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.248551 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.248584 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.248594 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.248603 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.248613 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jfmc5\" (UniqueName: \"kubernetes.io/projected/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-kube-api-access-jfmc5\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.255518 4818 scope.go:117] "RemoveContainer" containerID="bb8598e82102294a9eaed0ff410d368bbdb2fe417cc0a8ec7a53df335015949b" Sep 30 17:28:19 crc kubenswrapper[4818]: E0930 17:28:19.256065 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb8598e82102294a9eaed0ff410d368bbdb2fe417cc0a8ec7a53df335015949b\": container with ID starting with bb8598e82102294a9eaed0ff410d368bbdb2fe417cc0a8ec7a53df335015949b not found: ID does not exist" containerID="bb8598e82102294a9eaed0ff410d368bbdb2fe417cc0a8ec7a53df335015949b" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.256203 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb8598e82102294a9eaed0ff410d368bbdb2fe417cc0a8ec7a53df335015949b"} err="failed to get container status \"bb8598e82102294a9eaed0ff410d368bbdb2fe417cc0a8ec7a53df335015949b\": rpc error: code = NotFound desc = could not find container \"bb8598e82102294a9eaed0ff410d368bbdb2fe417cc0a8ec7a53df335015949b\": container with ID starting with bb8598e82102294a9eaed0ff410d368bbdb2fe417cc0a8ec7a53df335015949b not found: ID does not exist" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.261472 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "c276f56d-cf96-4dd0-b43b-ea9e72d2ec23" (UID: "c276f56d-cf96-4dd0-b43b-ea9e72d2ec23"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.265899 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-test-account-create-5nf96"] Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.350253 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdngc\" (UniqueName: \"kubernetes.io/projected/29b3cda2-f43c-40cb-b6b0-8f849847cafa-kube-api-access-jdngc\") pod \"watcher-test-account-create-5nf96\" (UID: \"29b3cda2-f43c-40cb-b6b0-8f849847cafa\") " pod="watcher-kuttl-default/watcher-test-account-create-5nf96" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.350671 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.452354 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdngc\" (UniqueName: \"kubernetes.io/projected/29b3cda2-f43c-40cb-b6b0-8f849847cafa-kube-api-access-jdngc\") pod \"watcher-test-account-create-5nf96\" (UID: \"29b3cda2-f43c-40cb-b6b0-8f849847cafa\") " pod="watcher-kuttl-default/watcher-test-account-create-5nf96" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.478972 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdngc\" (UniqueName: \"kubernetes.io/projected/29b3cda2-f43c-40cb-b6b0-8f849847cafa-kube-api-access-jdngc\") pod \"watcher-test-account-create-5nf96\" (UID: \"29b3cda2-f43c-40cb-b6b0-8f849847cafa\") " pod="watcher-kuttl-default/watcher-test-account-create-5nf96" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.565582 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-test-account-create-5nf96" Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.569613 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:28:19 crc kubenswrapper[4818]: I0930 17:28:19.578011 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:28:20 crc kubenswrapper[4818]: I0930 17:28:20.032446 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c276f56d-cf96-4dd0-b43b-ea9e72d2ec23" path="/var/lib/kubelet/pods/c276f56d-cf96-4dd0-b43b-ea9e72d2ec23/volumes" Sep 30 17:28:20 crc kubenswrapper[4818]: I0930 17:28:20.066260 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-test-account-create-5nf96"] Sep 30 17:28:20 crc kubenswrapper[4818]: I0930 17:28:20.245050 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"00605988-b664-47d9-8a19-104673424799","Type":"ContainerStarted","Data":"28bc0dbd89daf2ecc9b30c55f7f99b75b7de79877f2eeeaa7d30f356b0f8fe79"} Sep 30 17:28:20 crc kubenswrapper[4818]: I0930 17:28:20.245085 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"00605988-b664-47d9-8a19-104673424799","Type":"ContainerStarted","Data":"26bc4509acca485d4a9b51d750068cf9d79b49e59f54669c47145a55ffeee2c2"} Sep 30 17:28:20 crc kubenswrapper[4818]: I0930 17:28:20.248671 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-test-account-create-5nf96" event={"ID":"29b3cda2-f43c-40cb-b6b0-8f849847cafa","Type":"ContainerStarted","Data":"37f0039064a51e57df1ed0d22d31ee9c11e91aae8188a5a41cf2023f1a18db32"} Sep 30 17:28:21 crc kubenswrapper[4818]: I0930 17:28:21.274393 4818 generic.go:334] "Generic (PLEG): container finished" podID="29b3cda2-f43c-40cb-b6b0-8f849847cafa" containerID="1d035176cf8c9777ef092070627b3a9109e143d9bed407bcb20f16d1dec29bc3" exitCode=0 Sep 30 17:28:21 crc kubenswrapper[4818]: I0930 17:28:21.274621 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-test-account-create-5nf96" event={"ID":"29b3cda2-f43c-40cb-b6b0-8f849847cafa","Type":"ContainerDied","Data":"1d035176cf8c9777ef092070627b3a9109e143d9bed407bcb20f16d1dec29bc3"} Sep 30 17:28:22 crc kubenswrapper[4818]: I0930 17:28:22.286761 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"00605988-b664-47d9-8a19-104673424799","Type":"ContainerStarted","Data":"b338df486816f7c95569235ff11bbc7cef94d34678edf2f55265ddc32d7395a3"} Sep 30 17:28:22 crc kubenswrapper[4818]: I0930 17:28:22.287145 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:22 crc kubenswrapper[4818]: I0930 17:28:22.312569 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=2.44160444 podStartE2EDuration="6.312548679s" podCreationTimestamp="2025-09-30 17:28:16 +0000 UTC" firstStartedPulling="2025-09-30 17:28:17.418036921 +0000 UTC m=+1744.172308737" lastFinishedPulling="2025-09-30 17:28:21.28898112 +0000 UTC m=+1748.043252976" observedRunningTime="2025-09-30 17:28:22.308216962 +0000 UTC m=+1749.062488778" watchObservedRunningTime="2025-09-30 17:28:22.312548679 +0000 UTC m=+1749.066820495" Sep 30 17:28:22 crc kubenswrapper[4818]: I0930 17:28:22.664077 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-test-account-create-5nf96" Sep 30 17:28:22 crc kubenswrapper[4818]: I0930 17:28:22.824564 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdngc\" (UniqueName: \"kubernetes.io/projected/29b3cda2-f43c-40cb-b6b0-8f849847cafa-kube-api-access-jdngc\") pod \"29b3cda2-f43c-40cb-b6b0-8f849847cafa\" (UID: \"29b3cda2-f43c-40cb-b6b0-8f849847cafa\") " Sep 30 17:28:22 crc kubenswrapper[4818]: I0930 17:28:22.843199 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29b3cda2-f43c-40cb-b6b0-8f849847cafa-kube-api-access-jdngc" (OuterVolumeSpecName: "kube-api-access-jdngc") pod "29b3cda2-f43c-40cb-b6b0-8f849847cafa" (UID: "29b3cda2-f43c-40cb-b6b0-8f849847cafa"). InnerVolumeSpecName "kube-api-access-jdngc". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:28:22 crc kubenswrapper[4818]: I0930 17:28:22.927708 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdngc\" (UniqueName: \"kubernetes.io/projected/29b3cda2-f43c-40cb-b6b0-8f849847cafa-kube-api-access-jdngc\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:23 crc kubenswrapper[4818]: I0930 17:28:23.051307 4818 scope.go:117] "RemoveContainer" containerID="b1c8f422c861eb90f8e2fbaa82d47d565c47b417456fbc1f1853d78d50f2ffe8" Sep 30 17:28:23 crc kubenswrapper[4818]: I0930 17:28:23.297014 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-test-account-create-5nf96" Sep 30 17:28:23 crc kubenswrapper[4818]: I0930 17:28:23.308262 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-test-account-create-5nf96" event={"ID":"29b3cda2-f43c-40cb-b6b0-8f849847cafa","Type":"ContainerDied","Data":"37f0039064a51e57df1ed0d22d31ee9c11e91aae8188a5a41cf2023f1a18db32"} Sep 30 17:28:23 crc kubenswrapper[4818]: I0930 17:28:23.308293 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="37f0039064a51e57df1ed0d22d31ee9c11e91aae8188a5a41cf2023f1a18db32" Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.398300 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx"] Sep 30 17:28:24 crc kubenswrapper[4818]: E0930 17:28:24.398678 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29b3cda2-f43c-40cb-b6b0-8f849847cafa" containerName="mariadb-account-create" Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.398692 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="29b3cda2-f43c-40cb-b6b0-8f849847cafa" containerName="mariadb-account-create" Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.398901 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="29b3cda2-f43c-40cb-b6b0-8f849847cafa" containerName="mariadb-account-create" Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.399590 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.401524 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-config-data" Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.402743 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-kzksz" Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.408535 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx"] Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.558729 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/153cf1c8-ebf2-4dee-a348-509aa5037630-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-lh9lx\" (UID: \"153cf1c8-ebf2-4dee-a348-509aa5037630\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.559066 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/153cf1c8-ebf2-4dee-a348-509aa5037630-config-data\") pod \"watcher-kuttl-db-sync-lh9lx\" (UID: \"153cf1c8-ebf2-4dee-a348-509aa5037630\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.559095 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/153cf1c8-ebf2-4dee-a348-509aa5037630-db-sync-config-data\") pod \"watcher-kuttl-db-sync-lh9lx\" (UID: \"153cf1c8-ebf2-4dee-a348-509aa5037630\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.559118 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75mbd\" (UniqueName: \"kubernetes.io/projected/153cf1c8-ebf2-4dee-a348-509aa5037630-kube-api-access-75mbd\") pod \"watcher-kuttl-db-sync-lh9lx\" (UID: \"153cf1c8-ebf2-4dee-a348-509aa5037630\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.660732 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/153cf1c8-ebf2-4dee-a348-509aa5037630-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-lh9lx\" (UID: \"153cf1c8-ebf2-4dee-a348-509aa5037630\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.660886 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/153cf1c8-ebf2-4dee-a348-509aa5037630-config-data\") pod \"watcher-kuttl-db-sync-lh9lx\" (UID: \"153cf1c8-ebf2-4dee-a348-509aa5037630\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.660991 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/153cf1c8-ebf2-4dee-a348-509aa5037630-db-sync-config-data\") pod \"watcher-kuttl-db-sync-lh9lx\" (UID: \"153cf1c8-ebf2-4dee-a348-509aa5037630\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.661057 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75mbd\" (UniqueName: \"kubernetes.io/projected/153cf1c8-ebf2-4dee-a348-509aa5037630-kube-api-access-75mbd\") pod \"watcher-kuttl-db-sync-lh9lx\" (UID: \"153cf1c8-ebf2-4dee-a348-509aa5037630\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.666549 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/153cf1c8-ebf2-4dee-a348-509aa5037630-db-sync-config-data\") pod \"watcher-kuttl-db-sync-lh9lx\" (UID: \"153cf1c8-ebf2-4dee-a348-509aa5037630\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.667334 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/153cf1c8-ebf2-4dee-a348-509aa5037630-config-data\") pod \"watcher-kuttl-db-sync-lh9lx\" (UID: \"153cf1c8-ebf2-4dee-a348-509aa5037630\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.668465 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/153cf1c8-ebf2-4dee-a348-509aa5037630-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-lh9lx\" (UID: \"153cf1c8-ebf2-4dee-a348-509aa5037630\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.681254 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75mbd\" (UniqueName: \"kubernetes.io/projected/153cf1c8-ebf2-4dee-a348-509aa5037630-kube-api-access-75mbd\") pod \"watcher-kuttl-db-sync-lh9lx\" (UID: \"153cf1c8-ebf2-4dee-a348-509aa5037630\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" Sep 30 17:28:24 crc kubenswrapper[4818]: I0930 17:28:24.726856 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" Sep 30 17:28:25 crc kubenswrapper[4818]: I0930 17:28:25.020585 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:28:25 crc kubenswrapper[4818]: E0930 17:28:25.021050 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:28:25 crc kubenswrapper[4818]: I0930 17:28:25.189136 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx"] Sep 30 17:28:25 crc kubenswrapper[4818]: W0930 17:28:25.204761 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod153cf1c8_ebf2_4dee_a348_509aa5037630.slice/crio-bc8255f816aa35f58d3b471563a8db140d7067554915f7dfc8601bfbeeeb3f17 WatchSource:0}: Error finding container bc8255f816aa35f58d3b471563a8db140d7067554915f7dfc8601bfbeeeb3f17: Status 404 returned error can't find the container with id bc8255f816aa35f58d3b471563a8db140d7067554915f7dfc8601bfbeeeb3f17 Sep 30 17:28:25 crc kubenswrapper[4818]: I0930 17:28:25.318545 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" event={"ID":"153cf1c8-ebf2-4dee-a348-509aa5037630","Type":"ContainerStarted","Data":"bc8255f816aa35f58d3b471563a8db140d7067554915f7dfc8601bfbeeeb3f17"} Sep 30 17:28:26 crc kubenswrapper[4818]: I0930 17:28:26.334193 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" event={"ID":"153cf1c8-ebf2-4dee-a348-509aa5037630","Type":"ContainerStarted","Data":"be5102971e9c13036fbca25c7228887b3ec7872cdb3a02253f1fa5e52ddb4d23"} Sep 30 17:28:26 crc kubenswrapper[4818]: I0930 17:28:26.364169 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" podStartSLOduration=2.364149576 podStartE2EDuration="2.364149576s" podCreationTimestamp="2025-09-30 17:28:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:28:26.358579815 +0000 UTC m=+1753.112851641" watchObservedRunningTime="2025-09-30 17:28:26.364149576 +0000 UTC m=+1753.118421382" Sep 30 17:28:28 crc kubenswrapper[4818]: I0930 17:28:28.351106 4818 generic.go:334] "Generic (PLEG): container finished" podID="153cf1c8-ebf2-4dee-a348-509aa5037630" containerID="be5102971e9c13036fbca25c7228887b3ec7872cdb3a02253f1fa5e52ddb4d23" exitCode=0 Sep 30 17:28:28 crc kubenswrapper[4818]: I0930 17:28:28.351142 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" event={"ID":"153cf1c8-ebf2-4dee-a348-509aa5037630","Type":"ContainerDied","Data":"be5102971e9c13036fbca25c7228887b3ec7872cdb3a02253f1fa5e52ddb4d23"} Sep 30 17:28:29 crc kubenswrapper[4818]: I0930 17:28:29.769740 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" Sep 30 17:28:29 crc kubenswrapper[4818]: I0930 17:28:29.840005 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/153cf1c8-ebf2-4dee-a348-509aa5037630-config-data\") pod \"153cf1c8-ebf2-4dee-a348-509aa5037630\" (UID: \"153cf1c8-ebf2-4dee-a348-509aa5037630\") " Sep 30 17:28:29 crc kubenswrapper[4818]: I0930 17:28:29.840068 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/153cf1c8-ebf2-4dee-a348-509aa5037630-db-sync-config-data\") pod \"153cf1c8-ebf2-4dee-a348-509aa5037630\" (UID: \"153cf1c8-ebf2-4dee-a348-509aa5037630\") " Sep 30 17:28:29 crc kubenswrapper[4818]: I0930 17:28:29.840124 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-75mbd\" (UniqueName: \"kubernetes.io/projected/153cf1c8-ebf2-4dee-a348-509aa5037630-kube-api-access-75mbd\") pod \"153cf1c8-ebf2-4dee-a348-509aa5037630\" (UID: \"153cf1c8-ebf2-4dee-a348-509aa5037630\") " Sep 30 17:28:29 crc kubenswrapper[4818]: I0930 17:28:29.840180 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/153cf1c8-ebf2-4dee-a348-509aa5037630-combined-ca-bundle\") pod \"153cf1c8-ebf2-4dee-a348-509aa5037630\" (UID: \"153cf1c8-ebf2-4dee-a348-509aa5037630\") " Sep 30 17:28:29 crc kubenswrapper[4818]: I0930 17:28:29.846167 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/153cf1c8-ebf2-4dee-a348-509aa5037630-kube-api-access-75mbd" (OuterVolumeSpecName: "kube-api-access-75mbd") pod "153cf1c8-ebf2-4dee-a348-509aa5037630" (UID: "153cf1c8-ebf2-4dee-a348-509aa5037630"). InnerVolumeSpecName "kube-api-access-75mbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:28:29 crc kubenswrapper[4818]: I0930 17:28:29.851108 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/153cf1c8-ebf2-4dee-a348-509aa5037630-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "153cf1c8-ebf2-4dee-a348-509aa5037630" (UID: "153cf1c8-ebf2-4dee-a348-509aa5037630"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:29 crc kubenswrapper[4818]: I0930 17:28:29.866757 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/153cf1c8-ebf2-4dee-a348-509aa5037630-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "153cf1c8-ebf2-4dee-a348-509aa5037630" (UID: "153cf1c8-ebf2-4dee-a348-509aa5037630"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:29 crc kubenswrapper[4818]: I0930 17:28:29.895146 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/153cf1c8-ebf2-4dee-a348-509aa5037630-config-data" (OuterVolumeSpecName: "config-data") pod "153cf1c8-ebf2-4dee-a348-509aa5037630" (UID: "153cf1c8-ebf2-4dee-a348-509aa5037630"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:29 crc kubenswrapper[4818]: I0930 17:28:29.942878 4818 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/153cf1c8-ebf2-4dee-a348-509aa5037630-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:29 crc kubenswrapper[4818]: I0930 17:28:29.942912 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-75mbd\" (UniqueName: \"kubernetes.io/projected/153cf1c8-ebf2-4dee-a348-509aa5037630-kube-api-access-75mbd\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:29 crc kubenswrapper[4818]: I0930 17:28:29.942954 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/153cf1c8-ebf2-4dee-a348-509aa5037630-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:29 crc kubenswrapper[4818]: I0930 17:28:29.942964 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/153cf1c8-ebf2-4dee-a348-509aa5037630-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.369141 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" event={"ID":"153cf1c8-ebf2-4dee-a348-509aa5037630","Type":"ContainerDied","Data":"bc8255f816aa35f58d3b471563a8db140d7067554915f7dfc8601bfbeeeb3f17"} Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.369484 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc8255f816aa35f58d3b471563a8db140d7067554915f7dfc8601bfbeeeb3f17" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.369180 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.570936 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:28:30 crc kubenswrapper[4818]: E0930 17:28:30.571370 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="153cf1c8-ebf2-4dee-a348-509aa5037630" containerName="watcher-kuttl-db-sync" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.571390 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="153cf1c8-ebf2-4dee-a348-509aa5037630" containerName="watcher-kuttl-db-sync" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.571606 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="153cf1c8-ebf2-4dee-a348-509aa5037630" containerName="watcher-kuttl-db-sync" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.572698 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.575620 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.575698 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-kzksz" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.580267 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.591433 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.593148 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.606127 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.607144 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.611431 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-applier-config-data" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.637572 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.646494 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.654198 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-logs\") pod \"watcher-kuttl-api-1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.654268 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-custom-prometheus-ca\") pod \"watcher-kuttl-api-1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.654305 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c95wx\" (UniqueName: \"kubernetes.io/projected/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-kube-api-access-c95wx\") pod \"watcher-kuttl-api-1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.654367 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-combined-ca-bundle\") pod \"watcher-kuttl-api-1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.654445 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-config-data\") pod \"watcher-kuttl-api-1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.654497 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.654525 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-cert-memcached-mtls\") pod \"watcher-kuttl-api-1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.654592 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.654634 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.654681 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-logs\") pod \"watcher-kuttl-api-0\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.654942 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.654969 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-db42r\" (UniqueName: \"kubernetes.io/projected/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-kube-api-access-db42r\") pod \"watcher-kuttl-api-0\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.686115 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.687108 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.692381 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.698609 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.759731 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-config-data\") pod \"watcher-kuttl-api-1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.759775 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.759803 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/1b549c07-8dab-4c32-a027-45e710c74e95-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.759824 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.759842 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-cert-memcached-mtls\") pod \"watcher-kuttl-api-1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.759862 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.759883 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff93e567-401d-4642-8878-da000741d567-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.759905 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.759938 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.759961 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.759976 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nh7vj\" (UniqueName: \"kubernetes.io/projected/1b549c07-8dab-4c32-a027-45e710c74e95-kube-api-access-nh7vj\") pod \"watcher-kuttl-applier-0\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.759994 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b549c07-8dab-4c32-a027-45e710c74e95-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.760013 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-logs\") pod \"watcher-kuttl-api-0\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.760038 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.760057 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-db42r\" (UniqueName: \"kubernetes.io/projected/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-kube-api-access-db42r\") pod \"watcher-kuttl-api-0\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.760080 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b549c07-8dab-4c32-a027-45e710c74e95-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.760107 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-logs\") pod \"watcher-kuttl-api-1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.760122 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-custom-prometheus-ca\") pod \"watcher-kuttl-api-1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.760142 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c95wx\" (UniqueName: \"kubernetes.io/projected/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-kube-api-access-c95wx\") pod \"watcher-kuttl-api-1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.760158 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.760175 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b549c07-8dab-4c32-a027-45e710c74e95-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.760197 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjmch\" (UniqueName: \"kubernetes.io/projected/ff93e567-401d-4642-8878-da000741d567-kube-api-access-tjmch\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.760217 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-combined-ca-bundle\") pod \"watcher-kuttl-api-1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.767298 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-logs\") pod \"watcher-kuttl-api-0\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.767398 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-combined-ca-bundle\") pod \"watcher-kuttl-api-1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.770198 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-logs\") pod \"watcher-kuttl-api-1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.770831 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-cert-memcached-mtls\") pod \"watcher-kuttl-api-1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.774615 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.775527 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-config-data\") pod \"watcher-kuttl-api-1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.778465 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-custom-prometheus-ca\") pod \"watcher-kuttl-api-1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.779206 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.786438 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.786606 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.791377 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c95wx\" (UniqueName: \"kubernetes.io/projected/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-kube-api-access-c95wx\") pod \"watcher-kuttl-api-1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.799510 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-db42r\" (UniqueName: \"kubernetes.io/projected/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-kube-api-access-db42r\") pod \"watcher-kuttl-api-0\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.861507 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.861569 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff93e567-401d-4642-8878-da000741d567-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.861603 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.861639 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nh7vj\" (UniqueName: \"kubernetes.io/projected/1b549c07-8dab-4c32-a027-45e710c74e95-kube-api-access-nh7vj\") pod \"watcher-kuttl-applier-0\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.861664 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b549c07-8dab-4c32-a027-45e710c74e95-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.861716 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b549c07-8dab-4c32-a027-45e710c74e95-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.861789 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.861814 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b549c07-8dab-4c32-a027-45e710c74e95-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.861848 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjmch\" (UniqueName: \"kubernetes.io/projected/ff93e567-401d-4642-8878-da000741d567-kube-api-access-tjmch\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.861898 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.861945 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/1b549c07-8dab-4c32-a027-45e710c74e95-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.862667 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b549c07-8dab-4c32-a027-45e710c74e95-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.862978 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff93e567-401d-4642-8878-da000741d567-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.866826 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.867442 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.871612 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/1b549c07-8dab-4c32-a027-45e710c74e95-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.872205 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b549c07-8dab-4c32-a027-45e710c74e95-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.872753 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b549c07-8dab-4c32-a027-45e710c74e95-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.876399 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.884427 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nh7vj\" (UniqueName: \"kubernetes.io/projected/1b549c07-8dab-4c32-a027-45e710c74e95-kube-api-access-nh7vj\") pod \"watcher-kuttl-applier-0\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.884728 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.885763 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjmch\" (UniqueName: \"kubernetes.io/projected/ff93e567-401d-4642-8878-da000741d567-kube-api-access-tjmch\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.894416 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.924947 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:30 crc kubenswrapper[4818]: I0930 17:28:30.943559 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:31 crc kubenswrapper[4818]: I0930 17:28:31.001995 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:31 crc kubenswrapper[4818]: I0930 17:28:31.432452 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:28:31 crc kubenswrapper[4818]: I0930 17:28:31.546066 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:28:31 crc kubenswrapper[4818]: I0930 17:28:31.561462 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Sep 30 17:28:31 crc kubenswrapper[4818]: W0930 17:28:31.563168 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b549c07_8dab_4c32_a027_45e710c74e95.slice/crio-627b070f17a7e31bd0b2e6ca715ffb16ea1c16faf9da91cc4b34cc834ac7e199 WatchSource:0}: Error finding container 627b070f17a7e31bd0b2e6ca715ffb16ea1c16faf9da91cc4b34cc834ac7e199: Status 404 returned error can't find the container with id 627b070f17a7e31bd0b2e6ca715ffb16ea1c16faf9da91cc4b34cc834ac7e199 Sep 30 17:28:31 crc kubenswrapper[4818]: W0930 17:28:31.565398 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee49e41f_da78_4d97_b67c_d44c9bf5ade1.slice/crio-520aefa4e58e2c82fc7e746c4c4cdbdea2b60ed1abf32c7b2168f71d47e2412c WatchSource:0}: Error finding container 520aefa4e58e2c82fc7e746c4c4cdbdea2b60ed1abf32c7b2168f71d47e2412c: Status 404 returned error can't find the container with id 520aefa4e58e2c82fc7e746c4c4cdbdea2b60ed1abf32c7b2168f71d47e2412c Sep 30 17:28:31 crc kubenswrapper[4818]: I0930 17:28:31.571626 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:28:32 crc kubenswrapper[4818]: I0930 17:28:32.391494 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"ee49e41f-da78-4d97-b67c-d44c9bf5ade1","Type":"ContainerStarted","Data":"52c8e418537882e7aca3ca5aa72441aee1daa510c514987b06fd07750818a59f"} Sep 30 17:28:32 crc kubenswrapper[4818]: I0930 17:28:32.391858 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"ee49e41f-da78-4d97-b67c-d44c9bf5ade1","Type":"ContainerStarted","Data":"3a324f67c5c01ed018e09976ac385b7290a02a911ab894955ecd82c36a2af6ac"} Sep 30 17:28:32 crc kubenswrapper[4818]: I0930 17:28:32.391880 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:32 crc kubenswrapper[4818]: I0930 17:28:32.391891 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"ee49e41f-da78-4d97-b67c-d44c9bf5ade1","Type":"ContainerStarted","Data":"520aefa4e58e2c82fc7e746c4c4cdbdea2b60ed1abf32c7b2168f71d47e2412c"} Sep 30 17:28:32 crc kubenswrapper[4818]: I0930 17:28:32.393700 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"3ecbd411-02c5-4bed-af60-4bf5eed7d13e","Type":"ContainerStarted","Data":"d705512f9b6d197497ae0aee1a3fd8fae1c187b06bdbb8a01175ce85e38b5726"} Sep 30 17:28:32 crc kubenswrapper[4818]: I0930 17:28:32.393759 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"3ecbd411-02c5-4bed-af60-4bf5eed7d13e","Type":"ContainerStarted","Data":"15627b7c622e6af928c974888364635339a7157e2868ec976a570332ba6db562"} Sep 30 17:28:32 crc kubenswrapper[4818]: I0930 17:28:32.393770 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"3ecbd411-02c5-4bed-af60-4bf5eed7d13e","Type":"ContainerStarted","Data":"b9048f5468e7f74c310c4fbbd1fe98fae5dc4f0b3454c1c568ff717a92443f81"} Sep 30 17:28:32 crc kubenswrapper[4818]: I0930 17:28:32.393997 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:32 crc kubenswrapper[4818]: I0930 17:28:32.395347 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"ff93e567-401d-4642-8878-da000741d567","Type":"ContainerStarted","Data":"177d65fd3d03eb245f0c1c9187b0e95be10db62ad7c82e629218d6afca79d2ff"} Sep 30 17:28:32 crc kubenswrapper[4818]: I0930 17:28:32.395391 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"ff93e567-401d-4642-8878-da000741d567","Type":"ContainerStarted","Data":"27f1cbf1bb36598e010eb8c392db12e839fb41e6d3d94b0aca63a4999dac4ddb"} Sep 30 17:28:32 crc kubenswrapper[4818]: I0930 17:28:32.396651 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"1b549c07-8dab-4c32-a027-45e710c74e95","Type":"ContainerStarted","Data":"fb5da23b6d8c9e19bb0b41957ac84e4da26333e056b66e9ecc850035f3468bbc"} Sep 30 17:28:32 crc kubenswrapper[4818]: I0930 17:28:32.396675 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"1b549c07-8dab-4c32-a027-45e710c74e95","Type":"ContainerStarted","Data":"627b070f17a7e31bd0b2e6ca715ffb16ea1c16faf9da91cc4b34cc834ac7e199"} Sep 30 17:28:32 crc kubenswrapper[4818]: I0930 17:28:32.417437 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-1" podStartSLOduration=2.417421232 podStartE2EDuration="2.417421232s" podCreationTimestamp="2025-09-30 17:28:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:28:32.413218569 +0000 UTC m=+1759.167490385" watchObservedRunningTime="2025-09-30 17:28:32.417421232 +0000 UTC m=+1759.171693048" Sep 30 17:28:32 crc kubenswrapper[4818]: I0930 17:28:32.441349 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=2.441334299 podStartE2EDuration="2.441334299s" podCreationTimestamp="2025-09-30 17:28:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:28:32.437038783 +0000 UTC m=+1759.191310599" watchObservedRunningTime="2025-09-30 17:28:32.441334299 +0000 UTC m=+1759.195606115" Sep 30 17:28:32 crc kubenswrapper[4818]: I0930 17:28:32.461132 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=2.461114234 podStartE2EDuration="2.461114234s" podCreationTimestamp="2025-09-30 17:28:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:28:32.453906009 +0000 UTC m=+1759.208177825" watchObservedRunningTime="2025-09-30 17:28:32.461114234 +0000 UTC m=+1759.215386050" Sep 30 17:28:34 crc kubenswrapper[4818]: I0930 17:28:34.563933 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:34 crc kubenswrapper[4818]: I0930 17:28:34.594357 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podStartSLOduration=4.594336344 podStartE2EDuration="4.594336344s" podCreationTimestamp="2025-09-30 17:28:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:28:32.476339706 +0000 UTC m=+1759.230611522" watchObservedRunningTime="2025-09-30 17:28:34.594336344 +0000 UTC m=+1761.348608160" Sep 30 17:28:34 crc kubenswrapper[4818]: I0930 17:28:34.925380 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:35 crc kubenswrapper[4818]: I0930 17:28:35.895046 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:35 crc kubenswrapper[4818]: I0930 17:28:35.925233 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:35 crc kubenswrapper[4818]: I0930 17:28:35.944363 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:38 crc kubenswrapper[4818]: I0930 17:28:38.020664 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:28:38 crc kubenswrapper[4818]: E0930 17:28:38.021511 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:28:40 crc kubenswrapper[4818]: I0930 17:28:40.894654 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:40 crc kubenswrapper[4818]: I0930 17:28:40.900146 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:40 crc kubenswrapper[4818]: I0930 17:28:40.925720 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:40 crc kubenswrapper[4818]: I0930 17:28:40.930473 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:40 crc kubenswrapper[4818]: I0930 17:28:40.944138 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:40 crc kubenswrapper[4818]: I0930 17:28:40.997024 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:41 crc kubenswrapper[4818]: I0930 17:28:41.002683 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:41 crc kubenswrapper[4818]: I0930 17:28:41.040699 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:41 crc kubenswrapper[4818]: I0930 17:28:41.476594 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:41 crc kubenswrapper[4818]: I0930 17:28:41.481813 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:28:41 crc kubenswrapper[4818]: I0930 17:28:41.484332 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:28:41 crc kubenswrapper[4818]: I0930 17:28:41.512245 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:28:41 crc kubenswrapper[4818]: I0930 17:28:41.533613 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:28:42 crc kubenswrapper[4818]: I0930 17:28:42.816623 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:28:42 crc kubenswrapper[4818]: I0930 17:28:42.817303 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="00605988-b664-47d9-8a19-104673424799" containerName="ceilometer-central-agent" containerID="cri-o://851326e2127c6cd1652dc683d3cea8187519285118dacaceb44508f0f95d046f" gracePeriod=30 Sep 30 17:28:42 crc kubenswrapper[4818]: I0930 17:28:42.817972 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="00605988-b664-47d9-8a19-104673424799" containerName="proxy-httpd" containerID="cri-o://b338df486816f7c95569235ff11bbc7cef94d34678edf2f55265ddc32d7395a3" gracePeriod=30 Sep 30 17:28:42 crc kubenswrapper[4818]: I0930 17:28:42.818059 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="00605988-b664-47d9-8a19-104673424799" containerName="sg-core" containerID="cri-o://28bc0dbd89daf2ecc9b30c55f7f99b75b7de79877f2eeeaa7d30f356b0f8fe79" gracePeriod=30 Sep 30 17:28:42 crc kubenswrapper[4818]: I0930 17:28:42.818232 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="00605988-b664-47d9-8a19-104673424799" containerName="ceilometer-notification-agent" containerID="cri-o://26bc4509acca485d4a9b51d750068cf9d79b49e59f54669c47145a55ffeee2c2" gracePeriod=30 Sep 30 17:28:42 crc kubenswrapper[4818]: I0930 17:28:42.829610 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/ceilometer-0" podUID="00605988-b664-47d9-8a19-104673424799" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.224:3000/\": EOF" Sep 30 17:28:43 crc kubenswrapper[4818]: I0930 17:28:43.494430 4818 generic.go:334] "Generic (PLEG): container finished" podID="00605988-b664-47d9-8a19-104673424799" containerID="b338df486816f7c95569235ff11bbc7cef94d34678edf2f55265ddc32d7395a3" exitCode=0 Sep 30 17:28:43 crc kubenswrapper[4818]: I0930 17:28:43.494778 4818 generic.go:334] "Generic (PLEG): container finished" podID="00605988-b664-47d9-8a19-104673424799" containerID="28bc0dbd89daf2ecc9b30c55f7f99b75b7de79877f2eeeaa7d30f356b0f8fe79" exitCode=2 Sep 30 17:28:43 crc kubenswrapper[4818]: I0930 17:28:43.494795 4818 generic.go:334] "Generic (PLEG): container finished" podID="00605988-b664-47d9-8a19-104673424799" containerID="851326e2127c6cd1652dc683d3cea8187519285118dacaceb44508f0f95d046f" exitCode=0 Sep 30 17:28:43 crc kubenswrapper[4818]: I0930 17:28:43.494483 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"00605988-b664-47d9-8a19-104673424799","Type":"ContainerDied","Data":"b338df486816f7c95569235ff11bbc7cef94d34678edf2f55265ddc32d7395a3"} Sep 30 17:28:43 crc kubenswrapper[4818]: I0930 17:28:43.494889 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"00605988-b664-47d9-8a19-104673424799","Type":"ContainerDied","Data":"28bc0dbd89daf2ecc9b30c55f7f99b75b7de79877f2eeeaa7d30f356b0f8fe79"} Sep 30 17:28:43 crc kubenswrapper[4818]: I0930 17:28:43.494908 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"00605988-b664-47d9-8a19-104673424799","Type":"ContainerDied","Data":"851326e2127c6cd1652dc683d3cea8187519285118dacaceb44508f0f95d046f"} Sep 30 17:28:45 crc kubenswrapper[4818]: I0930 17:28:45.520830 4818 generic.go:334] "Generic (PLEG): container finished" podID="00605988-b664-47d9-8a19-104673424799" containerID="26bc4509acca485d4a9b51d750068cf9d79b49e59f54669c47145a55ffeee2c2" exitCode=0 Sep 30 17:28:45 crc kubenswrapper[4818]: I0930 17:28:45.520867 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"00605988-b664-47d9-8a19-104673424799","Type":"ContainerDied","Data":"26bc4509acca485d4a9b51d750068cf9d79b49e59f54669c47145a55ffeee2c2"} Sep 30 17:28:45 crc kubenswrapper[4818]: I0930 17:28:45.826128 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:45 crc kubenswrapper[4818]: I0930 17:28:45.927344 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-62v22\" (UniqueName: \"kubernetes.io/projected/00605988-b664-47d9-8a19-104673424799-kube-api-access-62v22\") pod \"00605988-b664-47d9-8a19-104673424799\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " Sep 30 17:28:45 crc kubenswrapper[4818]: I0930 17:28:45.927406 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00605988-b664-47d9-8a19-104673424799-run-httpd\") pod \"00605988-b664-47d9-8a19-104673424799\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " Sep 30 17:28:45 crc kubenswrapper[4818]: I0930 17:28:45.927443 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-combined-ca-bundle\") pod \"00605988-b664-47d9-8a19-104673424799\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " Sep 30 17:28:45 crc kubenswrapper[4818]: I0930 17:28:45.927484 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-sg-core-conf-yaml\") pod \"00605988-b664-47d9-8a19-104673424799\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " Sep 30 17:28:45 crc kubenswrapper[4818]: I0930 17:28:45.927523 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-scripts\") pod \"00605988-b664-47d9-8a19-104673424799\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " Sep 30 17:28:45 crc kubenswrapper[4818]: I0930 17:28:45.927582 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-config-data\") pod \"00605988-b664-47d9-8a19-104673424799\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " Sep 30 17:28:45 crc kubenswrapper[4818]: I0930 17:28:45.927621 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00605988-b664-47d9-8a19-104673424799-log-httpd\") pod \"00605988-b664-47d9-8a19-104673424799\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " Sep 30 17:28:45 crc kubenswrapper[4818]: I0930 17:28:45.927677 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-ceilometer-tls-certs\") pod \"00605988-b664-47d9-8a19-104673424799\" (UID: \"00605988-b664-47d9-8a19-104673424799\") " Sep 30 17:28:45 crc kubenswrapper[4818]: I0930 17:28:45.929021 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00605988-b664-47d9-8a19-104673424799-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "00605988-b664-47d9-8a19-104673424799" (UID: "00605988-b664-47d9-8a19-104673424799"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:28:45 crc kubenswrapper[4818]: I0930 17:28:45.929361 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00605988-b664-47d9-8a19-104673424799-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "00605988-b664-47d9-8a19-104673424799" (UID: "00605988-b664-47d9-8a19-104673424799"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:28:45 crc kubenswrapper[4818]: I0930 17:28:45.936080 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-scripts" (OuterVolumeSpecName: "scripts") pod "00605988-b664-47d9-8a19-104673424799" (UID: "00605988-b664-47d9-8a19-104673424799"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:45 crc kubenswrapper[4818]: I0930 17:28:45.939133 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00605988-b664-47d9-8a19-104673424799-kube-api-access-62v22" (OuterVolumeSpecName: "kube-api-access-62v22") pod "00605988-b664-47d9-8a19-104673424799" (UID: "00605988-b664-47d9-8a19-104673424799"). InnerVolumeSpecName "kube-api-access-62v22". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:28:45 crc kubenswrapper[4818]: I0930 17:28:45.953855 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "00605988-b664-47d9-8a19-104673424799" (UID: "00605988-b664-47d9-8a19-104673424799"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.004639 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "00605988-b664-47d9-8a19-104673424799" (UID: "00605988-b664-47d9-8a19-104673424799"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.012183 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "00605988-b664-47d9-8a19-104673424799" (UID: "00605988-b664-47d9-8a19-104673424799"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.029328 4818 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.029372 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-62v22\" (UniqueName: \"kubernetes.io/projected/00605988-b664-47d9-8a19-104673424799-kube-api-access-62v22\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.029385 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00605988-b664-47d9-8a19-104673424799-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.029396 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.029406 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.029417 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.029428 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00605988-b664-47d9-8a19-104673424799-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.113046 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-config-data" (OuterVolumeSpecName: "config-data") pod "00605988-b664-47d9-8a19-104673424799" (UID: "00605988-b664-47d9-8a19-104673424799"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.130646 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00605988-b664-47d9-8a19-104673424799-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.532939 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"00605988-b664-47d9-8a19-104673424799","Type":"ContainerDied","Data":"702ad881d57f037930dafa4880ad22aa9a2a250a5f723a45e7929d6a6dd3a83d"} Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.533016 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.533252 4818 scope.go:117] "RemoveContainer" containerID="b338df486816f7c95569235ff11bbc7cef94d34678edf2f55265ddc32d7395a3" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.567836 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.569437 4818 scope.go:117] "RemoveContainer" containerID="28bc0dbd89daf2ecc9b30c55f7f99b75b7de79877f2eeeaa7d30f356b0f8fe79" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.596395 4818 scope.go:117] "RemoveContainer" containerID="26bc4509acca485d4a9b51d750068cf9d79b49e59f54669c47145a55ffeee2c2" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.605137 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.623528 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:28:46 crc kubenswrapper[4818]: E0930 17:28:46.624003 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00605988-b664-47d9-8a19-104673424799" containerName="proxy-httpd" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.624028 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="00605988-b664-47d9-8a19-104673424799" containerName="proxy-httpd" Sep 30 17:28:46 crc kubenswrapper[4818]: E0930 17:28:46.624051 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00605988-b664-47d9-8a19-104673424799" containerName="ceilometer-central-agent" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.624060 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="00605988-b664-47d9-8a19-104673424799" containerName="ceilometer-central-agent" Sep 30 17:28:46 crc kubenswrapper[4818]: E0930 17:28:46.624078 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00605988-b664-47d9-8a19-104673424799" containerName="ceilometer-notification-agent" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.624087 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="00605988-b664-47d9-8a19-104673424799" containerName="ceilometer-notification-agent" Sep 30 17:28:46 crc kubenswrapper[4818]: E0930 17:28:46.624116 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00605988-b664-47d9-8a19-104673424799" containerName="sg-core" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.624124 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="00605988-b664-47d9-8a19-104673424799" containerName="sg-core" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.624314 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="00605988-b664-47d9-8a19-104673424799" containerName="ceilometer-notification-agent" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.624331 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="00605988-b664-47d9-8a19-104673424799" containerName="ceilometer-central-agent" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.624347 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="00605988-b664-47d9-8a19-104673424799" containerName="proxy-httpd" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.624367 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="00605988-b664-47d9-8a19-104673424799" containerName="sg-core" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.626555 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.627225 4818 scope.go:117] "RemoveContainer" containerID="851326e2127c6cd1652dc683d3cea8187519285118dacaceb44508f0f95d046f" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.632814 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.634688 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.634949 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.635068 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.744550 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2tsc\" (UniqueName: \"kubernetes.io/projected/38c23344-16fe-45f1-89ad-12a2f30887ef-kube-api-access-l2tsc\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.744623 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38c23344-16fe-45f1-89ad-12a2f30887ef-run-httpd\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.744664 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-config-data\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.744743 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38c23344-16fe-45f1-89ad-12a2f30887ef-log-httpd\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.744763 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.744848 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-scripts\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.744915 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.745060 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.846589 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2tsc\" (UniqueName: \"kubernetes.io/projected/38c23344-16fe-45f1-89ad-12a2f30887ef-kube-api-access-l2tsc\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.846638 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38c23344-16fe-45f1-89ad-12a2f30887ef-run-httpd\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.846682 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-config-data\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.846752 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38c23344-16fe-45f1-89ad-12a2f30887ef-log-httpd\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.846776 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.846800 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-scripts\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.846825 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.846875 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.848362 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38c23344-16fe-45f1-89ad-12a2f30887ef-log-httpd\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.848396 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38c23344-16fe-45f1-89ad-12a2f30887ef-run-httpd\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.853665 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.854107 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-config-data\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.854185 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-scripts\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.855132 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.871540 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2tsc\" (UniqueName: \"kubernetes.io/projected/38c23344-16fe-45f1-89ad-12a2f30887ef-kube-api-access-l2tsc\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.873597 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:46 crc kubenswrapper[4818]: I0930 17:28:46.951511 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:47 crc kubenswrapper[4818]: I0930 17:28:47.235756 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:28:47 crc kubenswrapper[4818]: I0930 17:28:47.544976 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38c23344-16fe-45f1-89ad-12a2f30887ef","Type":"ContainerStarted","Data":"30b24e55f31a8b5eb4e5b0c120d546ab8b08d11110e912e7e3066c9cd2f12942"} Sep 30 17:28:48 crc kubenswrapper[4818]: I0930 17:28:48.036947 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00605988-b664-47d9-8a19-104673424799" path="/var/lib/kubelet/pods/00605988-b664-47d9-8a19-104673424799/volumes" Sep 30 17:28:48 crc kubenswrapper[4818]: I0930 17:28:48.562237 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38c23344-16fe-45f1-89ad-12a2f30887ef","Type":"ContainerStarted","Data":"2b5f94b95cbc3e21e5cf766a767c152a9ea2f56b120e6044cdc4c702d3baa716"} Sep 30 17:28:49 crc kubenswrapper[4818]: I0930 17:28:49.571693 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38c23344-16fe-45f1-89ad-12a2f30887ef","Type":"ContainerStarted","Data":"d7bbd4228e98f35969db62f786ca121978d2febb550d0804a2faad2827bf3a3e"} Sep 30 17:28:49 crc kubenswrapper[4818]: I0930 17:28:49.572036 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38c23344-16fe-45f1-89ad-12a2f30887ef","Type":"ContainerStarted","Data":"93d46d3898c0d8c297e93f414cb0dc08dd2b7d9683a681fb103c4cab258e73b4"} Sep 30 17:28:50 crc kubenswrapper[4818]: E0930 17:28:50.634350 4818 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.24:58990->38.102.83.24:43897: read tcp 38.102.83.24:58990->38.102.83.24:43897: read: connection reset by peer Sep 30 17:28:51 crc kubenswrapper[4818]: I0930 17:28:51.020560 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:28:51 crc kubenswrapper[4818]: E0930 17:28:51.020750 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:28:51 crc kubenswrapper[4818]: I0930 17:28:51.600069 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38c23344-16fe-45f1-89ad-12a2f30887ef","Type":"ContainerStarted","Data":"f0116d2a4efb75a8fd31db4a5e91b4676d09117f684e0148be049f98b6c3f9ad"} Sep 30 17:28:51 crc kubenswrapper[4818]: I0930 17:28:51.600844 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:28:51 crc kubenswrapper[4818]: I0930 17:28:51.638240 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=1.5873392480000001 podStartE2EDuration="5.638203645s" podCreationTimestamp="2025-09-30 17:28:46 +0000 UTC" firstStartedPulling="2025-09-30 17:28:47.253157708 +0000 UTC m=+1774.007429534" lastFinishedPulling="2025-09-30 17:28:51.304022075 +0000 UTC m=+1778.058293931" observedRunningTime="2025-09-30 17:28:51.63024066 +0000 UTC m=+1778.384512486" watchObservedRunningTime="2025-09-30 17:28:51.638203645 +0000 UTC m=+1778.392475501" Sep 30 17:29:00 crc kubenswrapper[4818]: I0930 17:29:00.137864 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb"] Sep 30 17:29:00 crc kubenswrapper[4818]: I0930 17:29:00.139885 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" Sep 30 17:29:00 crc kubenswrapper[4818]: I0930 17:29:00.141797 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-config-data" Sep 30 17:29:00 crc kubenswrapper[4818]: I0930 17:29:00.141797 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-scripts" Sep 30 17:29:00 crc kubenswrapper[4818]: I0930 17:29:00.154938 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb"] Sep 30 17:29:00 crc kubenswrapper[4818]: I0930 17:29:00.188733 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjtwv\" (UniqueName: \"kubernetes.io/projected/82821d25-9c13-463b-87f9-94ee1c90ad78-kube-api-access-wjtwv\") pod \"watcher-kuttl-db-purge-29320889-fkggb\" (UID: \"82821d25-9c13-463b-87f9-94ee1c90ad78\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" Sep 30 17:29:00 crc kubenswrapper[4818]: I0930 17:29:00.188819 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82821d25-9c13-463b-87f9-94ee1c90ad78-config-data\") pod \"watcher-kuttl-db-purge-29320889-fkggb\" (UID: \"82821d25-9c13-463b-87f9-94ee1c90ad78\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" Sep 30 17:29:00 crc kubenswrapper[4818]: I0930 17:29:00.188873 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82821d25-9c13-463b-87f9-94ee1c90ad78-combined-ca-bundle\") pod \"watcher-kuttl-db-purge-29320889-fkggb\" (UID: \"82821d25-9c13-463b-87f9-94ee1c90ad78\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" Sep 30 17:29:00 crc kubenswrapper[4818]: I0930 17:29:00.188898 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts-volume\" (UniqueName: \"kubernetes.io/secret/82821d25-9c13-463b-87f9-94ee1c90ad78-scripts-volume\") pod \"watcher-kuttl-db-purge-29320889-fkggb\" (UID: \"82821d25-9c13-463b-87f9-94ee1c90ad78\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" Sep 30 17:29:00 crc kubenswrapper[4818]: I0930 17:29:00.290013 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82821d25-9c13-463b-87f9-94ee1c90ad78-combined-ca-bundle\") pod \"watcher-kuttl-db-purge-29320889-fkggb\" (UID: \"82821d25-9c13-463b-87f9-94ee1c90ad78\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" Sep 30 17:29:00 crc kubenswrapper[4818]: I0930 17:29:00.290064 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts-volume\" (UniqueName: \"kubernetes.io/secret/82821d25-9c13-463b-87f9-94ee1c90ad78-scripts-volume\") pod \"watcher-kuttl-db-purge-29320889-fkggb\" (UID: \"82821d25-9c13-463b-87f9-94ee1c90ad78\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" Sep 30 17:29:00 crc kubenswrapper[4818]: I0930 17:29:00.290125 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjtwv\" (UniqueName: \"kubernetes.io/projected/82821d25-9c13-463b-87f9-94ee1c90ad78-kube-api-access-wjtwv\") pod \"watcher-kuttl-db-purge-29320889-fkggb\" (UID: \"82821d25-9c13-463b-87f9-94ee1c90ad78\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" Sep 30 17:29:00 crc kubenswrapper[4818]: I0930 17:29:00.290177 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82821d25-9c13-463b-87f9-94ee1c90ad78-config-data\") pod \"watcher-kuttl-db-purge-29320889-fkggb\" (UID: \"82821d25-9c13-463b-87f9-94ee1c90ad78\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" Sep 30 17:29:00 crc kubenswrapper[4818]: I0930 17:29:00.304337 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82821d25-9c13-463b-87f9-94ee1c90ad78-combined-ca-bundle\") pod \"watcher-kuttl-db-purge-29320889-fkggb\" (UID: \"82821d25-9c13-463b-87f9-94ee1c90ad78\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" Sep 30 17:29:00 crc kubenswrapper[4818]: I0930 17:29:00.304350 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts-volume\" (UniqueName: \"kubernetes.io/secret/82821d25-9c13-463b-87f9-94ee1c90ad78-scripts-volume\") pod \"watcher-kuttl-db-purge-29320889-fkggb\" (UID: \"82821d25-9c13-463b-87f9-94ee1c90ad78\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" Sep 30 17:29:00 crc kubenswrapper[4818]: I0930 17:29:00.304443 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82821d25-9c13-463b-87f9-94ee1c90ad78-config-data\") pod \"watcher-kuttl-db-purge-29320889-fkggb\" (UID: \"82821d25-9c13-463b-87f9-94ee1c90ad78\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" Sep 30 17:29:00 crc kubenswrapper[4818]: I0930 17:29:00.316329 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjtwv\" (UniqueName: \"kubernetes.io/projected/82821d25-9c13-463b-87f9-94ee1c90ad78-kube-api-access-wjtwv\") pod \"watcher-kuttl-db-purge-29320889-fkggb\" (UID: \"82821d25-9c13-463b-87f9-94ee1c90ad78\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" Sep 30 17:29:00 crc kubenswrapper[4818]: I0930 17:29:00.459436 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" Sep 30 17:29:00 crc kubenswrapper[4818]: I0930 17:29:00.902054 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb"] Sep 30 17:29:01 crc kubenswrapper[4818]: I0930 17:29:01.703770 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" event={"ID":"82821d25-9c13-463b-87f9-94ee1c90ad78","Type":"ContainerStarted","Data":"a1bc811334aed386d9af54366006dbcb983c45143e6154f200489e70256f2bbe"} Sep 30 17:29:01 crc kubenswrapper[4818]: I0930 17:29:01.703810 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" event={"ID":"82821d25-9c13-463b-87f9-94ee1c90ad78","Type":"ContainerStarted","Data":"6b04ebf595cd79ef2400c63f3aa2092fa0b66ec77be126db077f924a2d7c8ac6"} Sep 30 17:29:01 crc kubenswrapper[4818]: I0930 17:29:01.723504 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" podStartSLOduration=1.7234848980000002 podStartE2EDuration="1.723484898s" podCreationTimestamp="2025-09-30 17:29:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:29:01.720888608 +0000 UTC m=+1788.475160444" watchObservedRunningTime="2025-09-30 17:29:01.723484898 +0000 UTC m=+1788.477756714" Sep 30 17:29:03 crc kubenswrapper[4818]: I0930 17:29:03.721019 4818 generic.go:334] "Generic (PLEG): container finished" podID="82821d25-9c13-463b-87f9-94ee1c90ad78" containerID="a1bc811334aed386d9af54366006dbcb983c45143e6154f200489e70256f2bbe" exitCode=0 Sep 30 17:29:03 crc kubenswrapper[4818]: I0930 17:29:03.721102 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" event={"ID":"82821d25-9c13-463b-87f9-94ee1c90ad78","Type":"ContainerDied","Data":"a1bc811334aed386d9af54366006dbcb983c45143e6154f200489e70256f2bbe"} Sep 30 17:29:04 crc kubenswrapper[4818]: I0930 17:29:04.027709 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:29:04 crc kubenswrapper[4818]: E0930 17:29:04.029360 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:29:05 crc kubenswrapper[4818]: I0930 17:29:05.150782 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" Sep 30 17:29:05 crc kubenswrapper[4818]: I0930 17:29:05.191498 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82821d25-9c13-463b-87f9-94ee1c90ad78-combined-ca-bundle\") pod \"82821d25-9c13-463b-87f9-94ee1c90ad78\" (UID: \"82821d25-9c13-463b-87f9-94ee1c90ad78\") " Sep 30 17:29:05 crc kubenswrapper[4818]: I0930 17:29:05.191578 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts-volume\" (UniqueName: \"kubernetes.io/secret/82821d25-9c13-463b-87f9-94ee1c90ad78-scripts-volume\") pod \"82821d25-9c13-463b-87f9-94ee1c90ad78\" (UID: \"82821d25-9c13-463b-87f9-94ee1c90ad78\") " Sep 30 17:29:05 crc kubenswrapper[4818]: I0930 17:29:05.191695 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wjtwv\" (UniqueName: \"kubernetes.io/projected/82821d25-9c13-463b-87f9-94ee1c90ad78-kube-api-access-wjtwv\") pod \"82821d25-9c13-463b-87f9-94ee1c90ad78\" (UID: \"82821d25-9c13-463b-87f9-94ee1c90ad78\") " Sep 30 17:29:05 crc kubenswrapper[4818]: I0930 17:29:05.191776 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82821d25-9c13-463b-87f9-94ee1c90ad78-config-data\") pod \"82821d25-9c13-463b-87f9-94ee1c90ad78\" (UID: \"82821d25-9c13-463b-87f9-94ee1c90ad78\") " Sep 30 17:29:05 crc kubenswrapper[4818]: I0930 17:29:05.208970 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82821d25-9c13-463b-87f9-94ee1c90ad78-scripts-volume" (OuterVolumeSpecName: "scripts-volume") pod "82821d25-9c13-463b-87f9-94ee1c90ad78" (UID: "82821d25-9c13-463b-87f9-94ee1c90ad78"). InnerVolumeSpecName "scripts-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:05 crc kubenswrapper[4818]: I0930 17:29:05.210004 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82821d25-9c13-463b-87f9-94ee1c90ad78-kube-api-access-wjtwv" (OuterVolumeSpecName: "kube-api-access-wjtwv") pod "82821d25-9c13-463b-87f9-94ee1c90ad78" (UID: "82821d25-9c13-463b-87f9-94ee1c90ad78"). InnerVolumeSpecName "kube-api-access-wjtwv". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:29:05 crc kubenswrapper[4818]: I0930 17:29:05.223670 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82821d25-9c13-463b-87f9-94ee1c90ad78-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "82821d25-9c13-463b-87f9-94ee1c90ad78" (UID: "82821d25-9c13-463b-87f9-94ee1c90ad78"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:05 crc kubenswrapper[4818]: I0930 17:29:05.245970 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82821d25-9c13-463b-87f9-94ee1c90ad78-config-data" (OuterVolumeSpecName: "config-data") pod "82821d25-9c13-463b-87f9-94ee1c90ad78" (UID: "82821d25-9c13-463b-87f9-94ee1c90ad78"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:05 crc kubenswrapper[4818]: I0930 17:29:05.294260 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82821d25-9c13-463b-87f9-94ee1c90ad78-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:05 crc kubenswrapper[4818]: I0930 17:29:05.294303 4818 reconciler_common.go:293] "Volume detached for volume \"scripts-volume\" (UniqueName: \"kubernetes.io/secret/82821d25-9c13-463b-87f9-94ee1c90ad78-scripts-volume\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:05 crc kubenswrapper[4818]: I0930 17:29:05.294317 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wjtwv\" (UniqueName: \"kubernetes.io/projected/82821d25-9c13-463b-87f9-94ee1c90ad78-kube-api-access-wjtwv\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:05 crc kubenswrapper[4818]: I0930 17:29:05.294334 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82821d25-9c13-463b-87f9-94ee1c90ad78-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:05 crc kubenswrapper[4818]: I0930 17:29:05.741374 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" event={"ID":"82821d25-9c13-463b-87f9-94ee1c90ad78","Type":"ContainerDied","Data":"6b04ebf595cd79ef2400c63f3aa2092fa0b66ec77be126db077f924a2d7c8ac6"} Sep 30 17:29:05 crc kubenswrapper[4818]: I0930 17:29:05.741419 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b04ebf595cd79ef2400c63f3aa2092fa0b66ec77be126db077f924a2d7c8ac6" Sep 30 17:29:05 crc kubenswrapper[4818]: I0930 17:29:05.741424 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb" Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.236024 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx"] Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.243296 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-lh9lx"] Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.256729 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb"] Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.262600 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-purge-29320889-fkggb"] Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.286476 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watchertest-account-delete-n64q5"] Sep 30 17:29:09 crc kubenswrapper[4818]: E0930 17:29:09.286963 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82821d25-9c13-463b-87f9-94ee1c90ad78" containerName="watcher-db-manage" Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.286985 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="82821d25-9c13-463b-87f9-94ee1c90ad78" containerName="watcher-db-manage" Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.287218 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="82821d25-9c13-463b-87f9-94ee1c90ad78" containerName="watcher-db-manage" Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.287995 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watchertest-account-delete-n64q5" Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.298043 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watchertest-account-delete-n64q5"] Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.357942 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.358186 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="1b549c07-8dab-4c32-a027-45e710c74e95" containerName="watcher-applier" containerID="cri-o://fb5da23b6d8c9e19bb0b41957ac84e4da26333e056b66e9ecc850035f3468bbc" gracePeriod=30 Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.360348 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mb6p\" (UniqueName: \"kubernetes.io/projected/689f3625-d798-4303-bf54-5c5eac457e73-kube-api-access-8mb6p\") pod \"watchertest-account-delete-n64q5\" (UID: \"689f3625-d798-4303-bf54-5c5eac457e73\") " pod="watcher-kuttl-default/watchertest-account-delete-n64q5" Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.383230 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.383512 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="3ecbd411-02c5-4bed-af60-4bf5eed7d13e" containerName="watcher-kuttl-api-log" containerID="cri-o://15627b7c622e6af928c974888364635339a7157e2868ec976a570332ba6db562" gracePeriod=30 Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.383651 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="3ecbd411-02c5-4bed-af60-4bf5eed7d13e" containerName="watcher-api" containerID="cri-o://d705512f9b6d197497ae0aee1a3fd8fae1c187b06bdbb8a01175ce85e38b5726" gracePeriod=30 Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.393183 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.394005 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-1" podUID="ee49e41f-da78-4d97-b67c-d44c9bf5ade1" containerName="watcher-kuttl-api-log" containerID="cri-o://3a324f67c5c01ed018e09976ac385b7290a02a911ab894955ecd82c36a2af6ac" gracePeriod=30 Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.394692 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-1" podUID="ee49e41f-da78-4d97-b67c-d44c9bf5ade1" containerName="watcher-api" containerID="cri-o://52c8e418537882e7aca3ca5aa72441aee1daa510c514987b06fd07750818a59f" gracePeriod=30 Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.431063 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.431880 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="ff93e567-401d-4642-8878-da000741d567" containerName="watcher-decision-engine" containerID="cri-o://177d65fd3d03eb245f0c1c9187b0e95be10db62ad7c82e629218d6afca79d2ff" gracePeriod=30 Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.468225 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mb6p\" (UniqueName: \"kubernetes.io/projected/689f3625-d798-4303-bf54-5c5eac457e73-kube-api-access-8mb6p\") pod \"watchertest-account-delete-n64q5\" (UID: \"689f3625-d798-4303-bf54-5c5eac457e73\") " pod="watcher-kuttl-default/watchertest-account-delete-n64q5" Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.496326 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mb6p\" (UniqueName: \"kubernetes.io/projected/689f3625-d798-4303-bf54-5c5eac457e73-kube-api-access-8mb6p\") pod \"watchertest-account-delete-n64q5\" (UID: \"689f3625-d798-4303-bf54-5c5eac457e73\") " pod="watcher-kuttl-default/watchertest-account-delete-n64q5" Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.611056 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watchertest-account-delete-n64q5" Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.803963 4818 generic.go:334] "Generic (PLEG): container finished" podID="ee49e41f-da78-4d97-b67c-d44c9bf5ade1" containerID="3a324f67c5c01ed018e09976ac385b7290a02a911ab894955ecd82c36a2af6ac" exitCode=143 Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.804151 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"ee49e41f-da78-4d97-b67c-d44c9bf5ade1","Type":"ContainerDied","Data":"3a324f67c5c01ed018e09976ac385b7290a02a911ab894955ecd82c36a2af6ac"} Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.828221 4818 generic.go:334] "Generic (PLEG): container finished" podID="3ecbd411-02c5-4bed-af60-4bf5eed7d13e" containerID="15627b7c622e6af928c974888364635339a7157e2868ec976a570332ba6db562" exitCode=143 Sep 30 17:29:09 crc kubenswrapper[4818]: I0930 17:29:09.828273 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"3ecbd411-02c5-4bed-af60-4bf5eed7d13e","Type":"ContainerDied","Data":"15627b7c622e6af928c974888364635339a7157e2868ec976a570332ba6db562"} Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.030549 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="153cf1c8-ebf2-4dee-a348-509aa5037630" path="/var/lib/kubelet/pods/153cf1c8-ebf2-4dee-a348-509aa5037630/volumes" Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.031063 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82821d25-9c13-463b-87f9-94ee1c90ad78" path="/var/lib/kubelet/pods/82821d25-9c13-463b-87f9-94ee1c90ad78/volumes" Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.169786 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watchertest-account-delete-n64q5"] Sep 30 17:29:10 crc kubenswrapper[4818]: W0930 17:29:10.172396 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod689f3625_d798_4303_bf54_5c5eac457e73.slice/crio-3564d9a12ffab2de50d42bd5b37c5dc347e76defab9ef3cc66f25afe05005f9b WatchSource:0}: Error finding container 3564d9a12ffab2de50d42bd5b37c5dc347e76defab9ef3cc66f25afe05005f9b: Status 404 returned error can't find the container with id 3564d9a12ffab2de50d42bd5b37c5dc347e76defab9ef3cc66f25afe05005f9b Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.867468 4818 generic.go:334] "Generic (PLEG): container finished" podID="ee49e41f-da78-4d97-b67c-d44c9bf5ade1" containerID="52c8e418537882e7aca3ca5aa72441aee1daa510c514987b06fd07750818a59f" exitCode=0 Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.867531 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"ee49e41f-da78-4d97-b67c-d44c9bf5ade1","Type":"ContainerDied","Data":"52c8e418537882e7aca3ca5aa72441aee1daa510c514987b06fd07750818a59f"} Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.867878 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"ee49e41f-da78-4d97-b67c-d44c9bf5ade1","Type":"ContainerDied","Data":"520aefa4e58e2c82fc7e746c4c4cdbdea2b60ed1abf32c7b2168f71d47e2412c"} Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.867893 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="520aefa4e58e2c82fc7e746c4c4cdbdea2b60ed1abf32c7b2168f71d47e2412c" Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.869665 4818 generic.go:334] "Generic (PLEG): container finished" podID="3ecbd411-02c5-4bed-af60-4bf5eed7d13e" containerID="d705512f9b6d197497ae0aee1a3fd8fae1c187b06bdbb8a01175ce85e38b5726" exitCode=0 Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.869722 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"3ecbd411-02c5-4bed-af60-4bf5eed7d13e","Type":"ContainerDied","Data":"d705512f9b6d197497ae0aee1a3fd8fae1c187b06bdbb8a01175ce85e38b5726"} Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.872095 4818 generic.go:334] "Generic (PLEG): container finished" podID="689f3625-d798-4303-bf54-5c5eac457e73" containerID="0f6a05764849702bd462c38b716c0e14ab93d2934472600e1de818f1556d5489" exitCode=0 Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.872153 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watchertest-account-delete-n64q5" event={"ID":"689f3625-d798-4303-bf54-5c5eac457e73","Type":"ContainerDied","Data":"0f6a05764849702bd462c38b716c0e14ab93d2934472600e1de818f1556d5489"} Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.872174 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watchertest-account-delete-n64q5" event={"ID":"689f3625-d798-4303-bf54-5c5eac457e73","Type":"ContainerStarted","Data":"3564d9a12ffab2de50d42bd5b37c5dc347e76defab9ef3cc66f25afe05005f9b"} Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.878521 4818 generic.go:334] "Generic (PLEG): container finished" podID="ff93e567-401d-4642-8878-da000741d567" containerID="177d65fd3d03eb245f0c1c9187b0e95be10db62ad7c82e629218d6afca79d2ff" exitCode=0 Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.878572 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"ff93e567-401d-4642-8878-da000741d567","Type":"ContainerDied","Data":"177d65fd3d03eb245f0c1c9187b0e95be10db62ad7c82e629218d6afca79d2ff"} Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.879668 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:29:10 crc kubenswrapper[4818]: E0930 17:29:10.946713 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fb5da23b6d8c9e19bb0b41957ac84e4da26333e056b66e9ecc850035f3468bbc" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 30 17:29:10 crc kubenswrapper[4818]: E0930 17:29:10.948494 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fb5da23b6d8c9e19bb0b41957ac84e4da26333e056b66e9ecc850035f3468bbc" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 30 17:29:10 crc kubenswrapper[4818]: E0930 17:29:10.951057 4818 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fb5da23b6d8c9e19bb0b41957ac84e4da26333e056b66e9ecc850035f3468bbc" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 30 17:29:10 crc kubenswrapper[4818]: E0930 17:29:10.951093 4818 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="1b549c07-8dab-4c32-a027-45e710c74e95" containerName="watcher-applier" Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.999422 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-config-data\") pod \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.999498 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-logs\") pod \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.999568 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-cert-memcached-mtls\") pod \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.999602 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-combined-ca-bundle\") pod \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.999671 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-custom-prometheus-ca\") pod \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " Sep 30 17:29:10 crc kubenswrapper[4818]: I0930 17:29:10.999717 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c95wx\" (UniqueName: \"kubernetes.io/projected/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-kube-api-access-c95wx\") pod \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\" (UID: \"ee49e41f-da78-4d97-b67c-d44c9bf5ade1\") " Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.003028 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-logs" (OuterVolumeSpecName: "logs") pod "ee49e41f-da78-4d97-b67c-d44c9bf5ade1" (UID: "ee49e41f-da78-4d97-b67c-d44c9bf5ade1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.008153 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-kube-api-access-c95wx" (OuterVolumeSpecName: "kube-api-access-c95wx") pod "ee49e41f-da78-4d97-b67c-d44c9bf5ade1" (UID: "ee49e41f-da78-4d97-b67c-d44c9bf5ade1"). InnerVolumeSpecName "kube-api-access-c95wx". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.038342 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "ee49e41f-da78-4d97-b67c-d44c9bf5ade1" (UID: "ee49e41f-da78-4d97-b67c-d44c9bf5ade1"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.055822 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.063134 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-config-data" (OuterVolumeSpecName: "config-data") pod "ee49e41f-da78-4d97-b67c-d44c9bf5ade1" (UID: "ee49e41f-da78-4d97-b67c-d44c9bf5ade1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.063529 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ee49e41f-da78-4d97-b67c-d44c9bf5ade1" (UID: "ee49e41f-da78-4d97-b67c-d44c9bf5ade1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.080124 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.111688 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-combined-ca-bundle\") pod \"ff93e567-401d-4642-8878-da000741d567\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.111755 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-config-data\") pod \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.111799 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-cert-memcached-mtls\") pod \"ff93e567-401d-4642-8878-da000741d567\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.111832 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff93e567-401d-4642-8878-da000741d567-logs\") pod \"ff93e567-401d-4642-8878-da000741d567\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.111852 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-cert-memcached-mtls\") pod \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.111899 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-logs\") pod \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.111948 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-db42r\" (UniqueName: \"kubernetes.io/projected/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-kube-api-access-db42r\") pod \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.112016 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-config-data\") pod \"ff93e567-401d-4642-8878-da000741d567\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.112036 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-combined-ca-bundle\") pod \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.112064 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-custom-prometheus-ca\") pod \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\" (UID: \"3ecbd411-02c5-4bed-af60-4bf5eed7d13e\") " Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.112099 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tjmch\" (UniqueName: \"kubernetes.io/projected/ff93e567-401d-4642-8878-da000741d567-kube-api-access-tjmch\") pod \"ff93e567-401d-4642-8878-da000741d567\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.112119 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-custom-prometheus-ca\") pod \"ff93e567-401d-4642-8878-da000741d567\" (UID: \"ff93e567-401d-4642-8878-da000741d567\") " Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.112479 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.112496 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c95wx\" (UniqueName: \"kubernetes.io/projected/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-kube-api-access-c95wx\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.112507 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.112516 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.112524 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.125908 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff93e567-401d-4642-8878-da000741d567-logs" (OuterVolumeSpecName: "logs") pod "ff93e567-401d-4642-8878-da000741d567" (UID: "ff93e567-401d-4642-8878-da000741d567"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.126254 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-logs" (OuterVolumeSpecName: "logs") pod "3ecbd411-02c5-4bed-af60-4bf5eed7d13e" (UID: "3ecbd411-02c5-4bed-af60-4bf5eed7d13e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.126576 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff93e567-401d-4642-8878-da000741d567-kube-api-access-tjmch" (OuterVolumeSpecName: "kube-api-access-tjmch") pod "ff93e567-401d-4642-8878-da000741d567" (UID: "ff93e567-401d-4642-8878-da000741d567"). InnerVolumeSpecName "kube-api-access-tjmch". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.138241 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-kube-api-access-db42r" (OuterVolumeSpecName: "kube-api-access-db42r") pod "3ecbd411-02c5-4bed-af60-4bf5eed7d13e" (UID: "3ecbd411-02c5-4bed-af60-4bf5eed7d13e"). InnerVolumeSpecName "kube-api-access-db42r". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.140901 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "ee49e41f-da78-4d97-b67c-d44c9bf5ade1" (UID: "ee49e41f-da78-4d97-b67c-d44c9bf5ade1"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.145519 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "3ecbd411-02c5-4bed-af60-4bf5eed7d13e" (UID: "3ecbd411-02c5-4bed-af60-4bf5eed7d13e"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.152977 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ff93e567-401d-4642-8878-da000741d567" (UID: "ff93e567-401d-4642-8878-da000741d567"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.172740 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3ecbd411-02c5-4bed-af60-4bf5eed7d13e" (UID: "3ecbd411-02c5-4bed-af60-4bf5eed7d13e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.201396 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "ff93e567-401d-4642-8878-da000741d567" (UID: "ff93e567-401d-4642-8878-da000741d567"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.202014 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-config-data" (OuterVolumeSpecName: "config-data") pod "3ecbd411-02c5-4bed-af60-4bf5eed7d13e" (UID: "3ecbd411-02c5-4bed-af60-4bf5eed7d13e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.213881 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff93e567-401d-4642-8878-da000741d567-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.213907 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.213915 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-db42r\" (UniqueName: \"kubernetes.io/projected/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-kube-api-access-db42r\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.213950 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/ee49e41f-da78-4d97-b67c-d44c9bf5ade1-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.213958 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.213966 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.213974 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tjmch\" (UniqueName: \"kubernetes.io/projected/ff93e567-401d-4642-8878-da000741d567-kube-api-access-tjmch\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.213982 4818 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.213990 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.213997 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.221434 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "3ecbd411-02c5-4bed-af60-4bf5eed7d13e" (UID: "3ecbd411-02c5-4bed-af60-4bf5eed7d13e"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.221534 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-config-data" (OuterVolumeSpecName: "config-data") pod "ff93e567-401d-4642-8878-da000741d567" (UID: "ff93e567-401d-4642-8878-da000741d567"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.223731 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "ff93e567-401d-4642-8878-da000741d567" (UID: "ff93e567-401d-4642-8878-da000741d567"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.315688 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.315917 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/ff93e567-401d-4642-8878-da000741d567-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.316059 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/3ecbd411-02c5-4bed-af60-4bf5eed7d13e-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.888965 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.889257 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"ff93e567-401d-4642-8878-da000741d567","Type":"ContainerDied","Data":"27f1cbf1bb36598e010eb8c392db12e839fb41e6d3d94b0aca63a4999dac4ddb"} Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.890159 4818 scope.go:117] "RemoveContainer" containerID="177d65fd3d03eb245f0c1c9187b0e95be10db62ad7c82e629218d6afca79d2ff" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.891562 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"3ecbd411-02c5-4bed-af60-4bf5eed7d13e","Type":"ContainerDied","Data":"b9048f5468e7f74c310c4fbbd1fe98fae5dc4f0b3454c1c568ff717a92443f81"} Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.891583 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-1" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.891903 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.956322 4818 scope.go:117] "RemoveContainer" containerID="d705512f9b6d197497ae0aee1a3fd8fae1c187b06bdbb8a01175ce85e38b5726" Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.960610 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.970798 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.985311 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.985967 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.986099 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:29:11 crc kubenswrapper[4818]: I0930 17:29:11.987356 4818 scope.go:117] "RemoveContainer" containerID="15627b7c622e6af928c974888364635339a7157e2868ec976a570332ba6db562" Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.000069 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.068256 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ecbd411-02c5-4bed-af60-4bf5eed7d13e" path="/var/lib/kubelet/pods/3ecbd411-02c5-4bed-af60-4bf5eed7d13e/volumes" Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.069203 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee49e41f-da78-4d97-b67c-d44c9bf5ade1" path="/var/lib/kubelet/pods/ee49e41f-da78-4d97-b67c-d44c9bf5ade1/volumes" Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.069882 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff93e567-401d-4642-8878-da000741d567" path="/var/lib/kubelet/pods/ff93e567-401d-4642-8878-da000741d567/volumes" Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.250509 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watchertest-account-delete-n64q5" Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.281498 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.281745 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerName="ceilometer-central-agent" containerID="cri-o://2b5f94b95cbc3e21e5cf766a767c152a9ea2f56b120e6044cdc4c702d3baa716" gracePeriod=30 Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.282414 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerName="proxy-httpd" containerID="cri-o://f0116d2a4efb75a8fd31db4a5e91b4676d09117f684e0148be049f98b6c3f9ad" gracePeriod=30 Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.282472 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerName="sg-core" containerID="cri-o://d7bbd4228e98f35969db62f786ca121978d2febb550d0804a2faad2827bf3a3e" gracePeriod=30 Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.282504 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerName="ceilometer-notification-agent" containerID="cri-o://93d46d3898c0d8c297e93f414cb0dc08dd2b7d9683a681fb103c4cab258e73b4" gracePeriod=30 Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.314982 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/ceilometer-0" podUID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.231:3000/\": EOF" Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.342153 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8mb6p\" (UniqueName: \"kubernetes.io/projected/689f3625-d798-4303-bf54-5c5eac457e73-kube-api-access-8mb6p\") pod \"689f3625-d798-4303-bf54-5c5eac457e73\" (UID: \"689f3625-d798-4303-bf54-5c5eac457e73\") " Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.345972 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/689f3625-d798-4303-bf54-5c5eac457e73-kube-api-access-8mb6p" (OuterVolumeSpecName: "kube-api-access-8mb6p") pod "689f3625-d798-4303-bf54-5c5eac457e73" (UID: "689f3625-d798-4303-bf54-5c5eac457e73"). InnerVolumeSpecName "kube-api-access-8mb6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.443724 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8mb6p\" (UniqueName: \"kubernetes.io/projected/689f3625-d798-4303-bf54-5c5eac457e73-kube-api-access-8mb6p\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.901785 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watchertest-account-delete-n64q5" event={"ID":"689f3625-d798-4303-bf54-5c5eac457e73","Type":"ContainerDied","Data":"3564d9a12ffab2de50d42bd5b37c5dc347e76defab9ef3cc66f25afe05005f9b"} Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.901829 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3564d9a12ffab2de50d42bd5b37c5dc347e76defab9ef3cc66f25afe05005f9b" Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.901840 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watchertest-account-delete-n64q5" Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.904575 4818 generic.go:334] "Generic (PLEG): container finished" podID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerID="f0116d2a4efb75a8fd31db4a5e91b4676d09117f684e0148be049f98b6c3f9ad" exitCode=0 Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.904597 4818 generic.go:334] "Generic (PLEG): container finished" podID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerID="d7bbd4228e98f35969db62f786ca121978d2febb550d0804a2faad2827bf3a3e" exitCode=2 Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.904605 4818 generic.go:334] "Generic (PLEG): container finished" podID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerID="2b5f94b95cbc3e21e5cf766a767c152a9ea2f56b120e6044cdc4c702d3baa716" exitCode=0 Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.904640 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38c23344-16fe-45f1-89ad-12a2f30887ef","Type":"ContainerDied","Data":"f0116d2a4efb75a8fd31db4a5e91b4676d09117f684e0148be049f98b6c3f9ad"} Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.904655 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38c23344-16fe-45f1-89ad-12a2f30887ef","Type":"ContainerDied","Data":"d7bbd4228e98f35969db62f786ca121978d2febb550d0804a2faad2827bf3a3e"} Sep 30 17:29:12 crc kubenswrapper[4818]: I0930 17:29:12.904665 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38c23344-16fe-45f1-89ad-12a2f30887ef","Type":"ContainerDied","Data":"2b5f94b95cbc3e21e5cf766a767c152a9ea2f56b120e6044cdc4c702d3baa716"} Sep 30 17:29:14 crc kubenswrapper[4818]: I0930 17:29:14.308096 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-db-create-5g6kd"] Sep 30 17:29:14 crc kubenswrapper[4818]: I0930 17:29:14.314335 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-db-create-5g6kd"] Sep 30 17:29:14 crc kubenswrapper[4818]: I0930 17:29:14.320017 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watchertest-account-delete-n64q5"] Sep 30 17:29:14 crc kubenswrapper[4818]: I0930 17:29:14.325620 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-test-account-create-5nf96"] Sep 30 17:29:14 crc kubenswrapper[4818]: I0930 17:29:14.331019 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watchertest-account-delete-n64q5"] Sep 30 17:29:14 crc kubenswrapper[4818]: I0930 17:29:14.336772 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-test-account-create-5nf96"] Sep 30 17:29:14 crc kubenswrapper[4818]: I0930 17:29:14.925348 4818 generic.go:334] "Generic (PLEG): container finished" podID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerID="93d46d3898c0d8c297e93f414cb0dc08dd2b7d9683a681fb103c4cab258e73b4" exitCode=0 Sep 30 17:29:14 crc kubenswrapper[4818]: I0930 17:29:14.925535 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38c23344-16fe-45f1-89ad-12a2f30887ef","Type":"ContainerDied","Data":"93d46d3898c0d8c297e93f414cb0dc08dd2b7d9683a681fb103c4cab258e73b4"} Sep 30 17:29:14 crc kubenswrapper[4818]: I0930 17:29:14.927192 4818 generic.go:334] "Generic (PLEG): container finished" podID="1b549c07-8dab-4c32-a027-45e710c74e95" containerID="fb5da23b6d8c9e19bb0b41957ac84e4da26333e056b66e9ecc850035f3468bbc" exitCode=0 Sep 30 17:29:14 crc kubenswrapper[4818]: I0930 17:29:14.927226 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"1b549c07-8dab-4c32-a027-45e710c74e95","Type":"ContainerDied","Data":"fb5da23b6d8c9e19bb0b41957ac84e4da26333e056b66e9ecc850035f3468bbc"} Sep 30 17:29:14 crc kubenswrapper[4818]: I0930 17:29:14.927249 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"1b549c07-8dab-4c32-a027-45e710c74e95","Type":"ContainerDied","Data":"627b070f17a7e31bd0b2e6ca715ffb16ea1c16faf9da91cc4b34cc834ac7e199"} Sep 30 17:29:14 crc kubenswrapper[4818]: I0930 17:29:14.927263 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="627b070f17a7e31bd0b2e6ca715ffb16ea1c16faf9da91cc4b34cc834ac7e199" Sep 30 17:29:14 crc kubenswrapper[4818]: I0930 17:29:14.963043 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.084559 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b549c07-8dab-4c32-a027-45e710c74e95-logs\") pod \"1b549c07-8dab-4c32-a027-45e710c74e95\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.084667 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b549c07-8dab-4c32-a027-45e710c74e95-config-data\") pod \"1b549c07-8dab-4c32-a027-45e710c74e95\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.084811 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/1b549c07-8dab-4c32-a027-45e710c74e95-cert-memcached-mtls\") pod \"1b549c07-8dab-4c32-a027-45e710c74e95\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.084871 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b549c07-8dab-4c32-a027-45e710c74e95-combined-ca-bundle\") pod \"1b549c07-8dab-4c32-a027-45e710c74e95\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.084905 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1b549c07-8dab-4c32-a027-45e710c74e95-logs" (OuterVolumeSpecName: "logs") pod "1b549c07-8dab-4c32-a027-45e710c74e95" (UID: "1b549c07-8dab-4c32-a027-45e710c74e95"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.084945 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nh7vj\" (UniqueName: \"kubernetes.io/projected/1b549c07-8dab-4c32-a027-45e710c74e95-kube-api-access-nh7vj\") pod \"1b549c07-8dab-4c32-a027-45e710c74e95\" (UID: \"1b549c07-8dab-4c32-a027-45e710c74e95\") " Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.085401 4818 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b549c07-8dab-4c32-a027-45e710c74e95-logs\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.104261 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b549c07-8dab-4c32-a027-45e710c74e95-kube-api-access-nh7vj" (OuterVolumeSpecName: "kube-api-access-nh7vj") pod "1b549c07-8dab-4c32-a027-45e710c74e95" (UID: "1b549c07-8dab-4c32-a027-45e710c74e95"). InnerVolumeSpecName "kube-api-access-nh7vj". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.119199 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b549c07-8dab-4c32-a027-45e710c74e95-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1b549c07-8dab-4c32-a027-45e710c74e95" (UID: "1b549c07-8dab-4c32-a027-45e710c74e95"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.131536 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b549c07-8dab-4c32-a027-45e710c74e95-config-data" (OuterVolumeSpecName: "config-data") pod "1b549c07-8dab-4c32-a027-45e710c74e95" (UID: "1b549c07-8dab-4c32-a027-45e710c74e95"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.182764 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b549c07-8dab-4c32-a027-45e710c74e95-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "1b549c07-8dab-4c32-a027-45e710c74e95" (UID: "1b549c07-8dab-4c32-a027-45e710c74e95"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.186836 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b549c07-8dab-4c32-a027-45e710c74e95-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.186879 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nh7vj\" (UniqueName: \"kubernetes.io/projected/1b549c07-8dab-4c32-a027-45e710c74e95-kube-api-access-nh7vj\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.186897 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b549c07-8dab-4c32-a027-45e710c74e95-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.186908 4818 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/1b549c07-8dab-4c32-a027-45e710c74e95-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.243188 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.287717 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-sg-core-conf-yaml\") pod \"38c23344-16fe-45f1-89ad-12a2f30887ef\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.287767 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l2tsc\" (UniqueName: \"kubernetes.io/projected/38c23344-16fe-45f1-89ad-12a2f30887ef-kube-api-access-l2tsc\") pod \"38c23344-16fe-45f1-89ad-12a2f30887ef\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.287845 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-config-data\") pod \"38c23344-16fe-45f1-89ad-12a2f30887ef\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.288451 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38c23344-16fe-45f1-89ad-12a2f30887ef-run-httpd\") pod \"38c23344-16fe-45f1-89ad-12a2f30887ef\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.288481 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-combined-ca-bundle\") pod \"38c23344-16fe-45f1-89ad-12a2f30887ef\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.288512 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-scripts\") pod \"38c23344-16fe-45f1-89ad-12a2f30887ef\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.288538 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-ceilometer-tls-certs\") pod \"38c23344-16fe-45f1-89ad-12a2f30887ef\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.288585 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38c23344-16fe-45f1-89ad-12a2f30887ef-log-httpd\") pod \"38c23344-16fe-45f1-89ad-12a2f30887ef\" (UID: \"38c23344-16fe-45f1-89ad-12a2f30887ef\") " Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.289302 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38c23344-16fe-45f1-89ad-12a2f30887ef-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "38c23344-16fe-45f1-89ad-12a2f30887ef" (UID: "38c23344-16fe-45f1-89ad-12a2f30887ef"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.289874 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38c23344-16fe-45f1-89ad-12a2f30887ef-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "38c23344-16fe-45f1-89ad-12a2f30887ef" (UID: "38c23344-16fe-45f1-89ad-12a2f30887ef"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.291221 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38c23344-16fe-45f1-89ad-12a2f30887ef-kube-api-access-l2tsc" (OuterVolumeSpecName: "kube-api-access-l2tsc") pod "38c23344-16fe-45f1-89ad-12a2f30887ef" (UID: "38c23344-16fe-45f1-89ad-12a2f30887ef"). InnerVolumeSpecName "kube-api-access-l2tsc". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.294217 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-scripts" (OuterVolumeSpecName: "scripts") pod "38c23344-16fe-45f1-89ad-12a2f30887ef" (UID: "38c23344-16fe-45f1-89ad-12a2f30887ef"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.318127 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "38c23344-16fe-45f1-89ad-12a2f30887ef" (UID: "38c23344-16fe-45f1-89ad-12a2f30887ef"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.333802 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "38c23344-16fe-45f1-89ad-12a2f30887ef" (UID: "38c23344-16fe-45f1-89ad-12a2f30887ef"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.354061 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "38c23344-16fe-45f1-89ad-12a2f30887ef" (UID: "38c23344-16fe-45f1-89ad-12a2f30887ef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.372267 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-config-data" (OuterVolumeSpecName: "config-data") pod "38c23344-16fe-45f1-89ad-12a2f30887ef" (UID: "38c23344-16fe-45f1-89ad-12a2f30887ef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.390261 4818 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.390295 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l2tsc\" (UniqueName: \"kubernetes.io/projected/38c23344-16fe-45f1-89ad-12a2f30887ef-kube-api-access-l2tsc\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.390315 4818 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-config-data\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.390330 4818 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38c23344-16fe-45f1-89ad-12a2f30887ef-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.390342 4818 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.390353 4818 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-scripts\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.390363 4818 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/38c23344-16fe-45f1-89ad-12a2f30887ef-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.390371 4818 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38c23344-16fe-45f1-89ad-12a2f30887ef-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.895021 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="3ecbd411-02c5-4bed-af60-4bf5eed7d13e" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.227:9322/\": dial tcp 10.217.0.227:9322: i/o timeout (Client.Timeout exceeded while awaiting headers)" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.895530 4818 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="3ecbd411-02c5-4bed-af60-4bf5eed7d13e" containerName="watcher-kuttl-api-log" probeResult="failure" output="Get \"http://10.217.0.227:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.942741 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.942759 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"38c23344-16fe-45f1-89ad-12a2f30887ef","Type":"ContainerDied","Data":"30b24e55f31a8b5eb4e5b0c120d546ab8b08d11110e912e7e3066c9cd2f12942"} Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.942877 4818 scope.go:117] "RemoveContainer" containerID="f0116d2a4efb75a8fd31db4a5e91b4676d09117f684e0148be049f98b6c3f9ad" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.942797 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:15 crc kubenswrapper[4818]: I0930 17:29:15.988788 4818 scope.go:117] "RemoveContainer" containerID="d7bbd4228e98f35969db62f786ca121978d2febb550d0804a2faad2827bf3a3e" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.010148 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.025778 4818 scope.go:117] "RemoveContainer" containerID="93d46d3898c0d8c297e93f414cb0dc08dd2b7d9683a681fb103c4cab258e73b4" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.038322 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29b3cda2-f43c-40cb-b6b0-8f849847cafa" path="/var/lib/kubelet/pods/29b3cda2-f43c-40cb-b6b0-8f849847cafa/volumes" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.039469 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc" path="/var/lib/kubelet/pods/5a6fd6d6-781a-4ef6-8fca-c6f8619c50fc/volumes" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.040769 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="689f3625-d798-4303-bf54-5c5eac457e73" path="/var/lib/kubelet/pods/689f3625-d798-4303-bf54-5c5eac457e73/volumes" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.041819 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.043087 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.056004 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.060497 4818 scope.go:117] "RemoveContainer" containerID="2b5f94b95cbc3e21e5cf766a767c152a9ea2f56b120e6044cdc4c702d3baa716" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.063119 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:29:16 crc kubenswrapper[4818]: E0930 17:29:16.063648 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b549c07-8dab-4c32-a027-45e710c74e95" containerName="watcher-applier" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.063739 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b549c07-8dab-4c32-a027-45e710c74e95" containerName="watcher-applier" Sep 30 17:29:16 crc kubenswrapper[4818]: E0930 17:29:16.063813 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerName="ceilometer-central-agent" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.063883 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerName="ceilometer-central-agent" Sep 30 17:29:16 crc kubenswrapper[4818]: E0930 17:29:16.063991 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerName="ceilometer-notification-agent" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.064108 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerName="ceilometer-notification-agent" Sep 30 17:29:16 crc kubenswrapper[4818]: E0930 17:29:16.064208 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee49e41f-da78-4d97-b67c-d44c9bf5ade1" containerName="watcher-api" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.064281 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee49e41f-da78-4d97-b67c-d44c9bf5ade1" containerName="watcher-api" Sep 30 17:29:16 crc kubenswrapper[4818]: E0930 17:29:16.064361 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff93e567-401d-4642-8878-da000741d567" containerName="watcher-decision-engine" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.064431 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff93e567-401d-4642-8878-da000741d567" containerName="watcher-decision-engine" Sep 30 17:29:16 crc kubenswrapper[4818]: E0930 17:29:16.064517 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerName="sg-core" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.064587 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerName="sg-core" Sep 30 17:29:16 crc kubenswrapper[4818]: E0930 17:29:16.064662 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="689f3625-d798-4303-bf54-5c5eac457e73" containerName="mariadb-account-delete" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.064738 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="689f3625-d798-4303-bf54-5c5eac457e73" containerName="mariadb-account-delete" Sep 30 17:29:16 crc kubenswrapper[4818]: E0930 17:29:16.064817 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ecbd411-02c5-4bed-af60-4bf5eed7d13e" containerName="watcher-kuttl-api-log" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.064887 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ecbd411-02c5-4bed-af60-4bf5eed7d13e" containerName="watcher-kuttl-api-log" Sep 30 17:29:16 crc kubenswrapper[4818]: E0930 17:29:16.064984 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ecbd411-02c5-4bed-af60-4bf5eed7d13e" containerName="watcher-api" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.065058 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ecbd411-02c5-4bed-af60-4bf5eed7d13e" containerName="watcher-api" Sep 30 17:29:16 crc kubenswrapper[4818]: E0930 17:29:16.065136 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee49e41f-da78-4d97-b67c-d44c9bf5ade1" containerName="watcher-kuttl-api-log" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.065224 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee49e41f-da78-4d97-b67c-d44c9bf5ade1" containerName="watcher-kuttl-api-log" Sep 30 17:29:16 crc kubenswrapper[4818]: E0930 17:29:16.065302 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerName="proxy-httpd" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.065372 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerName="proxy-httpd" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.065611 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b549c07-8dab-4c32-a027-45e710c74e95" containerName="watcher-applier" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.065700 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="689f3625-d798-4303-bf54-5c5eac457e73" containerName="mariadb-account-delete" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.065781 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee49e41f-da78-4d97-b67c-d44c9bf5ade1" containerName="watcher-api" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.065861 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerName="proxy-httpd" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.065960 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff93e567-401d-4642-8878-da000741d567" containerName="watcher-decision-engine" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.066037 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ecbd411-02c5-4bed-af60-4bf5eed7d13e" containerName="watcher-api" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.066123 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerName="ceilometer-central-agent" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.066211 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ecbd411-02c5-4bed-af60-4bf5eed7d13e" containerName="watcher-kuttl-api-log" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.066287 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerName="ceilometer-notification-agent" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.066372 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="38c23344-16fe-45f1-89ad-12a2f30887ef" containerName="sg-core" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.066469 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee49e41f-da78-4d97-b67c-d44c9bf5ade1" containerName="watcher-kuttl-api-log" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.069405 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.072684 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.072761 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.073420 4818 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.084203 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.205010 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/907c0b05-3239-4941-b998-a132f9a12339-run-httpd\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.205138 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/907c0b05-3239-4941-b998-a132f9a12339-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.205166 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/907c0b05-3239-4941-b998-a132f9a12339-log-httpd\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.205200 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/907c0b05-3239-4941-b998-a132f9a12339-scripts\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.205228 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/907c0b05-3239-4941-b998-a132f9a12339-config-data\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.205251 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/907c0b05-3239-4941-b998-a132f9a12339-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.205302 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2hbp\" (UniqueName: \"kubernetes.io/projected/907c0b05-3239-4941-b998-a132f9a12339-kube-api-access-j2hbp\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.205336 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/907c0b05-3239-4941-b998-a132f9a12339-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.307038 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/907c0b05-3239-4941-b998-a132f9a12339-scripts\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.307103 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/907c0b05-3239-4941-b998-a132f9a12339-config-data\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.307129 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/907c0b05-3239-4941-b998-a132f9a12339-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.307171 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2hbp\" (UniqueName: \"kubernetes.io/projected/907c0b05-3239-4941-b998-a132f9a12339-kube-api-access-j2hbp\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.307192 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/907c0b05-3239-4941-b998-a132f9a12339-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.307207 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/907c0b05-3239-4941-b998-a132f9a12339-run-httpd\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.307264 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/907c0b05-3239-4941-b998-a132f9a12339-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.307282 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/907c0b05-3239-4941-b998-a132f9a12339-log-httpd\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.307791 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/907c0b05-3239-4941-b998-a132f9a12339-log-httpd\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.308024 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/907c0b05-3239-4941-b998-a132f9a12339-run-httpd\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.312573 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/907c0b05-3239-4941-b998-a132f9a12339-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.313595 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/907c0b05-3239-4941-b998-a132f9a12339-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.314030 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/907c0b05-3239-4941-b998-a132f9a12339-scripts\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.324105 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2hbp\" (UniqueName: \"kubernetes.io/projected/907c0b05-3239-4941-b998-a132f9a12339-kube-api-access-j2hbp\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.324320 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/907c0b05-3239-4941-b998-a132f9a12339-config-data\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.326323 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/907c0b05-3239-4941-b998-a132f9a12339-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"907c0b05-3239-4941-b998-a132f9a12339\") " pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.399365 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.853576 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Sep 30 17:29:16 crc kubenswrapper[4818]: W0930 17:29:16.857723 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod907c0b05_3239_4941_b998_a132f9a12339.slice/crio-cccf7887f0ce45ff450031ad05189842b58f399927b03bace6c91ec4c8c554b3 WatchSource:0}: Error finding container cccf7887f0ce45ff450031ad05189842b58f399927b03bace6c91ec4c8c554b3: Status 404 returned error can't find the container with id cccf7887f0ce45ff450031ad05189842b58f399927b03bace6c91ec4c8c554b3 Sep 30 17:29:16 crc kubenswrapper[4818]: I0930 17:29:16.952167 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"907c0b05-3239-4941-b998-a132f9a12339","Type":"ContainerStarted","Data":"cccf7887f0ce45ff450031ad05189842b58f399927b03bace6c91ec4c8c554b3"} Sep 30 17:29:17 crc kubenswrapper[4818]: I0930 17:29:17.961363 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"907c0b05-3239-4941-b998-a132f9a12339","Type":"ContainerStarted","Data":"9d157bdbc0d605c3d637c5d35a7b2c6178409d3a570ec51f793fd8be40c1208c"} Sep 30 17:29:18 crc kubenswrapper[4818]: I0930 17:29:18.032669 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:29:18 crc kubenswrapper[4818]: E0930 17:29:18.033620 4818 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vc6ss_openshift-machine-config-operator(5e908152-dcb2-4b41-974d-26b03ae0254b)\"" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" Sep 30 17:29:18 crc kubenswrapper[4818]: I0930 17:29:18.052572 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b549c07-8dab-4c32-a027-45e710c74e95" path="/var/lib/kubelet/pods/1b549c07-8dab-4c32-a027-45e710c74e95/volumes" Sep 30 17:29:18 crc kubenswrapper[4818]: I0930 17:29:18.053630 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38c23344-16fe-45f1-89ad-12a2f30887ef" path="/var/lib/kubelet/pods/38c23344-16fe-45f1-89ad-12a2f30887ef/volumes" Sep 30 17:29:18 crc kubenswrapper[4818]: I0930 17:29:18.972614 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"907c0b05-3239-4941-b998-a132f9a12339","Type":"ContainerStarted","Data":"23a18cefed345a94a89ef6c3ca3b59f957c83cc29d36e2e9a57da6fee037b71a"} Sep 30 17:29:18 crc kubenswrapper[4818]: I0930 17:29:18.972909 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"907c0b05-3239-4941-b998-a132f9a12339","Type":"ContainerStarted","Data":"3657cd9a99b612588482440841999ddbf2f83a204ef2076ced4f2d330afbc97a"} Sep 30 17:29:22 crc kubenswrapper[4818]: I0930 17:29:21.999485 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"907c0b05-3239-4941-b998-a132f9a12339","Type":"ContainerStarted","Data":"fdae45c35dc7267a2d703454e7db6b3dff56dbe70b0eefbdb3d394ad4ac8ca8a"} Sep 30 17:29:22 crc kubenswrapper[4818]: I0930 17:29:22.000188 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:22 crc kubenswrapper[4818]: I0930 17:29:22.047475 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=1.812631602 podStartE2EDuration="6.047457676s" podCreationTimestamp="2025-09-30 17:29:16 +0000 UTC" firstStartedPulling="2025-09-30 17:29:16.860605587 +0000 UTC m=+1803.614877403" lastFinishedPulling="2025-09-30 17:29:21.095431661 +0000 UTC m=+1807.849703477" observedRunningTime="2025-09-30 17:29:22.04243261 +0000 UTC m=+1808.796704426" watchObservedRunningTime="2025-09-30 17:29:22.047457676 +0000 UTC m=+1808.801729482" Sep 30 17:29:23 crc kubenswrapper[4818]: I0930 17:29:23.278850 4818 scope.go:117] "RemoveContainer" containerID="91564a2a2a8d30169140af78aa7b5e5655280aadb6056c32531dbd202bb6e1fc" Sep 30 17:29:23 crc kubenswrapper[4818]: I0930 17:29:23.306564 4818 scope.go:117] "RemoveContainer" containerID="230457141ad594e100eab2a73d72a0e9142c7dcc67320de3f47e61b5e280c4a1" Sep 30 17:29:23 crc kubenswrapper[4818]: I0930 17:29:23.337789 4818 scope.go:117] "RemoveContainer" containerID="04a7abfa7afd9d74dc59368a3af64149140e061912f167407d3db7ccde19cff2" Sep 30 17:29:23 crc kubenswrapper[4818]: I0930 17:29:23.377709 4818 scope.go:117] "RemoveContainer" containerID="c5ce4b7a86228d4bafc64dfd04a57d91c359195652a98639dbc63980ee5dad2d" Sep 30 17:29:23 crc kubenswrapper[4818]: I0930 17:29:23.425215 4818 scope.go:117] "RemoveContainer" containerID="45d3f53079ba7eddec2c293003245f40b37f1dfdf8c448ced3612e8ede0010ba" Sep 30 17:29:33 crc kubenswrapper[4818]: I0930 17:29:33.020649 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:29:34 crc kubenswrapper[4818]: I0930 17:29:34.124622 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerStarted","Data":"30880a11bad1ae43c066fbba66a2570618ec8b81f77a03b0a99408888162dbd7"} Sep 30 17:29:46 crc kubenswrapper[4818]: I0930 17:29:46.409476 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Sep 30 17:29:54 crc kubenswrapper[4818]: I0930 17:29:54.713833 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-bz92g/must-gather-2dm2p"] Sep 30 17:29:54 crc kubenswrapper[4818]: I0930 17:29:54.716128 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bz92g/must-gather-2dm2p" Sep 30 17:29:54 crc kubenswrapper[4818]: I0930 17:29:54.719723 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-bz92g"/"default-dockercfg-nttlw" Sep 30 17:29:54 crc kubenswrapper[4818]: I0930 17:29:54.720262 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-bz92g"/"openshift-service-ca.crt" Sep 30 17:29:54 crc kubenswrapper[4818]: I0930 17:29:54.721408 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-bz92g"/"kube-root-ca.crt" Sep 30 17:29:54 crc kubenswrapper[4818]: I0930 17:29:54.733567 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-bz92g/must-gather-2dm2p"] Sep 30 17:29:54 crc kubenswrapper[4818]: I0930 17:29:54.790817 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9c5f3844-a335-4cb1-b9d6-f22e0ee9d295-must-gather-output\") pod \"must-gather-2dm2p\" (UID: \"9c5f3844-a335-4cb1-b9d6-f22e0ee9d295\") " pod="openshift-must-gather-bz92g/must-gather-2dm2p" Sep 30 17:29:54 crc kubenswrapper[4818]: I0930 17:29:54.790866 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26hbh\" (UniqueName: \"kubernetes.io/projected/9c5f3844-a335-4cb1-b9d6-f22e0ee9d295-kube-api-access-26hbh\") pod \"must-gather-2dm2p\" (UID: \"9c5f3844-a335-4cb1-b9d6-f22e0ee9d295\") " pod="openshift-must-gather-bz92g/must-gather-2dm2p" Sep 30 17:29:54 crc kubenswrapper[4818]: I0930 17:29:54.892156 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9c5f3844-a335-4cb1-b9d6-f22e0ee9d295-must-gather-output\") pod \"must-gather-2dm2p\" (UID: \"9c5f3844-a335-4cb1-b9d6-f22e0ee9d295\") " pod="openshift-must-gather-bz92g/must-gather-2dm2p" Sep 30 17:29:54 crc kubenswrapper[4818]: I0930 17:29:54.892587 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9c5f3844-a335-4cb1-b9d6-f22e0ee9d295-must-gather-output\") pod \"must-gather-2dm2p\" (UID: \"9c5f3844-a335-4cb1-b9d6-f22e0ee9d295\") " pod="openshift-must-gather-bz92g/must-gather-2dm2p" Sep 30 17:29:54 crc kubenswrapper[4818]: I0930 17:29:54.892662 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26hbh\" (UniqueName: \"kubernetes.io/projected/9c5f3844-a335-4cb1-b9d6-f22e0ee9d295-kube-api-access-26hbh\") pod \"must-gather-2dm2p\" (UID: \"9c5f3844-a335-4cb1-b9d6-f22e0ee9d295\") " pod="openshift-must-gather-bz92g/must-gather-2dm2p" Sep 30 17:29:54 crc kubenswrapper[4818]: I0930 17:29:54.914615 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26hbh\" (UniqueName: \"kubernetes.io/projected/9c5f3844-a335-4cb1-b9d6-f22e0ee9d295-kube-api-access-26hbh\") pod \"must-gather-2dm2p\" (UID: \"9c5f3844-a335-4cb1-b9d6-f22e0ee9d295\") " pod="openshift-must-gather-bz92g/must-gather-2dm2p" Sep 30 17:29:55 crc kubenswrapper[4818]: I0930 17:29:55.032273 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bz92g/must-gather-2dm2p" Sep 30 17:29:55 crc kubenswrapper[4818]: I0930 17:29:55.484401 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-bz92g/must-gather-2dm2p"] Sep 30 17:29:55 crc kubenswrapper[4818]: I0930 17:29:55.494372 4818 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 30 17:29:56 crc kubenswrapper[4818]: I0930 17:29:56.317260 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bz92g/must-gather-2dm2p" event={"ID":"9c5f3844-a335-4cb1-b9d6-f22e0ee9d295","Type":"ContainerStarted","Data":"6599c2b52cafa08ba9f972b291529fb1e96880abbf1d266ab03fd31e3fc5fb37"} Sep 30 17:30:00 crc kubenswrapper[4818]: I0930 17:30:00.147963 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts"] Sep 30 17:30:00 crc kubenswrapper[4818]: I0930 17:30:00.150871 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts" Sep 30 17:30:00 crc kubenswrapper[4818]: I0930 17:30:00.152975 4818 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Sep 30 17:30:00 crc kubenswrapper[4818]: I0930 17:30:00.153097 4818 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Sep 30 17:30:00 crc kubenswrapper[4818]: I0930 17:30:00.155860 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts"] Sep 30 17:30:00 crc kubenswrapper[4818]: I0930 17:30:00.279626 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc-secret-volume\") pod \"collect-profiles-29320890-gtnts\" (UID: \"dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts" Sep 30 17:30:00 crc kubenswrapper[4818]: I0930 17:30:00.279713 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc-config-volume\") pod \"collect-profiles-29320890-gtnts\" (UID: \"dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts" Sep 30 17:30:00 crc kubenswrapper[4818]: I0930 17:30:00.279740 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdrz9\" (UniqueName: \"kubernetes.io/projected/dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc-kube-api-access-mdrz9\") pod \"collect-profiles-29320890-gtnts\" (UID: \"dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts" Sep 30 17:30:00 crc kubenswrapper[4818]: I0930 17:30:00.356352 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bz92g/must-gather-2dm2p" event={"ID":"9c5f3844-a335-4cb1-b9d6-f22e0ee9d295","Type":"ContainerStarted","Data":"6efba672397bd827cb22f2d66c816599b37d8aebad5326d47e87bb9dc43098fc"} Sep 30 17:30:00 crc kubenswrapper[4818]: I0930 17:30:00.381046 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc-secret-volume\") pod \"collect-profiles-29320890-gtnts\" (UID: \"dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts" Sep 30 17:30:00 crc kubenswrapper[4818]: I0930 17:30:00.381103 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc-config-volume\") pod \"collect-profiles-29320890-gtnts\" (UID: \"dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts" Sep 30 17:30:00 crc kubenswrapper[4818]: I0930 17:30:00.381127 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdrz9\" (UniqueName: \"kubernetes.io/projected/dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc-kube-api-access-mdrz9\") pod \"collect-profiles-29320890-gtnts\" (UID: \"dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts" Sep 30 17:30:00 crc kubenswrapper[4818]: I0930 17:30:00.382283 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc-config-volume\") pod \"collect-profiles-29320890-gtnts\" (UID: \"dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts" Sep 30 17:30:00 crc kubenswrapper[4818]: I0930 17:30:00.392893 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc-secret-volume\") pod \"collect-profiles-29320890-gtnts\" (UID: \"dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts" Sep 30 17:30:00 crc kubenswrapper[4818]: I0930 17:30:00.400029 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdrz9\" (UniqueName: \"kubernetes.io/projected/dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc-kube-api-access-mdrz9\") pod \"collect-profiles-29320890-gtnts\" (UID: \"dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts" Sep 30 17:30:00 crc kubenswrapper[4818]: I0930 17:30:00.468602 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts" Sep 30 17:30:00 crc kubenswrapper[4818]: W0930 17:30:00.993387 4818 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddc1d8719_4dda_4a8e_a2ed_e8ffd5477bbc.slice/crio-96eeb1d150526b02f34ba995963cedb0387a9d70389c3868f4d0d1e9db01b9c2 WatchSource:0}: Error finding container 96eeb1d150526b02f34ba995963cedb0387a9d70389c3868f4d0d1e9db01b9c2: Status 404 returned error can't find the container with id 96eeb1d150526b02f34ba995963cedb0387a9d70389c3868f4d0d1e9db01b9c2 Sep 30 17:30:01 crc kubenswrapper[4818]: I0930 17:30:01.000484 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts"] Sep 30 17:30:01 crc kubenswrapper[4818]: I0930 17:30:01.366227 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts" event={"ID":"dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc","Type":"ContainerStarted","Data":"621581b87023efc9298a8e47bf9e71c2afce5c140914dc91aca21ecba9c804b1"} Sep 30 17:30:01 crc kubenswrapper[4818]: I0930 17:30:01.366287 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts" event={"ID":"dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc","Type":"ContainerStarted","Data":"96eeb1d150526b02f34ba995963cedb0387a9d70389c3868f4d0d1e9db01b9c2"} Sep 30 17:30:01 crc kubenswrapper[4818]: I0930 17:30:01.368512 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bz92g/must-gather-2dm2p" event={"ID":"9c5f3844-a335-4cb1-b9d6-f22e0ee9d295","Type":"ContainerStarted","Data":"79cb4cb5cf9ef496cbd0e2efb69187c275a3643b654af71bfbbcccd74df505e2"} Sep 30 17:30:01 crc kubenswrapper[4818]: I0930 17:30:01.390887 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts" podStartSLOduration=1.390860538 podStartE2EDuration="1.390860538s" podCreationTimestamp="2025-09-30 17:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-30 17:30:01.384408973 +0000 UTC m=+1848.138680789" watchObservedRunningTime="2025-09-30 17:30:01.390860538 +0000 UTC m=+1848.145132354" Sep 30 17:30:01 crc kubenswrapper[4818]: I0930 17:30:01.413073 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-bz92g/must-gather-2dm2p" podStartSLOduration=3.086984517 podStartE2EDuration="7.413053268s" podCreationTimestamp="2025-09-30 17:29:54 +0000 UTC" firstStartedPulling="2025-09-30 17:29:55.494071354 +0000 UTC m=+1842.248343170" lastFinishedPulling="2025-09-30 17:29:59.820140115 +0000 UTC m=+1846.574411921" observedRunningTime="2025-09-30 17:30:01.405553325 +0000 UTC m=+1848.159825141" watchObservedRunningTime="2025-09-30 17:30:01.413053268 +0000 UTC m=+1848.167325084" Sep 30 17:30:02 crc kubenswrapper[4818]: I0930 17:30:02.378246 4818 generic.go:334] "Generic (PLEG): container finished" podID="dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc" containerID="621581b87023efc9298a8e47bf9e71c2afce5c140914dc91aca21ecba9c804b1" exitCode=0 Sep 30 17:30:02 crc kubenswrapper[4818]: I0930 17:30:02.379838 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts" event={"ID":"dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc","Type":"ContainerDied","Data":"621581b87023efc9298a8e47bf9e71c2afce5c140914dc91aca21ecba9c804b1"} Sep 30 17:30:03 crc kubenswrapper[4818]: I0930 17:30:03.714312 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts" Sep 30 17:30:03 crc kubenswrapper[4818]: I0930 17:30:03.840848 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mdrz9\" (UniqueName: \"kubernetes.io/projected/dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc-kube-api-access-mdrz9\") pod \"dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc\" (UID: \"dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc\") " Sep 30 17:30:03 crc kubenswrapper[4818]: I0930 17:30:03.840892 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc-config-volume\") pod \"dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc\" (UID: \"dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc\") " Sep 30 17:30:03 crc kubenswrapper[4818]: I0930 17:30:03.840942 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc-secret-volume\") pod \"dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc\" (UID: \"dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc\") " Sep 30 17:30:03 crc kubenswrapper[4818]: I0930 17:30:03.842193 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc-config-volume" (OuterVolumeSpecName: "config-volume") pod "dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc" (UID: "dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 30 17:30:03 crc kubenswrapper[4818]: I0930 17:30:03.847613 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc" (UID: "dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 30 17:30:03 crc kubenswrapper[4818]: I0930 17:30:03.855221 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc-kube-api-access-mdrz9" (OuterVolumeSpecName: "kube-api-access-mdrz9") pod "dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc" (UID: "dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc"). InnerVolumeSpecName "kube-api-access-mdrz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:30:03 crc kubenswrapper[4818]: I0930 17:30:03.943067 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mdrz9\" (UniqueName: \"kubernetes.io/projected/dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc-kube-api-access-mdrz9\") on node \"crc\" DevicePath \"\"" Sep 30 17:30:03 crc kubenswrapper[4818]: I0930 17:30:03.943094 4818 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc-config-volume\") on node \"crc\" DevicePath \"\"" Sep 30 17:30:03 crc kubenswrapper[4818]: I0930 17:30:03.943117 4818 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc-secret-volume\") on node \"crc\" DevicePath \"\"" Sep 30 17:30:04 crc kubenswrapper[4818]: I0930 17:30:04.393224 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts" event={"ID":"dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc","Type":"ContainerDied","Data":"96eeb1d150526b02f34ba995963cedb0387a9d70389c3868f4d0d1e9db01b9c2"} Sep 30 17:30:04 crc kubenswrapper[4818]: I0930 17:30:04.393262 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="96eeb1d150526b02f34ba995963cedb0387a9d70389c3868f4d0d1e9db01b9c2" Sep 30 17:30:04 crc kubenswrapper[4818]: I0930 17:30:04.393315 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29320890-gtnts" Sep 30 17:30:23 crc kubenswrapper[4818]: I0930 17:30:23.615069 4818 scope.go:117] "RemoveContainer" containerID="af3a8905b0821470104da568b542524329d233712b2a994da0bb6aed9d9286f1" Sep 30 17:30:23 crc kubenswrapper[4818]: I0930 17:30:23.636306 4818 scope.go:117] "RemoveContainer" containerID="24be82590247ba81f18514ac126b9d64df72666b58ca865e1441f1d433c6ff25" Sep 30 17:30:23 crc kubenswrapper[4818]: I0930 17:30:23.683809 4818 scope.go:117] "RemoveContainer" containerID="dbc822498410508327f04db177d74625cfcf30eff452704f9a0e6b4c88c0384c" Sep 30 17:30:23 crc kubenswrapper[4818]: I0930 17:30:23.715385 4818 scope.go:117] "RemoveContainer" containerID="92ac88fca6a6683e081c4fbbe95b5cbe4bd2df3952ad53486eded918aa01233f" Sep 30 17:30:23 crc kubenswrapper[4818]: I0930 17:30:23.762516 4818 scope.go:117] "RemoveContainer" containerID="a7318b88d040aeb2352229ca638e298bbc9636d342d7b51903d9f6da2745c950" Sep 30 17:30:23 crc kubenswrapper[4818]: I0930 17:30:23.816631 4818 scope.go:117] "RemoveContainer" containerID="7b08d65606af0ee74b552488b86b4be4afaea03991b7fa0d38a3827863f6c12c" Sep 30 17:30:23 crc kubenswrapper[4818]: I0930 17:30:23.855699 4818 scope.go:117] "RemoveContainer" containerID="f950aeccc9546a0bc347aa9b403f38db82e0ea3fb374796d2bc72d9ac4abff6f" Sep 30 17:30:23 crc kubenswrapper[4818]: I0930 17:30:23.896547 4818 scope.go:117] "RemoveContainer" containerID="2b71b8c7c0932ed04e3e19f535583209888aef2dd13c8d3a7d174f4aee786c6e" Sep 30 17:30:59 crc kubenswrapper[4818]: I0930 17:30:59.929173 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt_c184175d-d18e-45cb-a79d-d27dfa315d2b/util/0.log" Sep 30 17:31:00 crc kubenswrapper[4818]: I0930 17:31:00.088866 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt_c184175d-d18e-45cb-a79d-d27dfa315d2b/pull/0.log" Sep 30 17:31:00 crc kubenswrapper[4818]: I0930 17:31:00.095299 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt_c184175d-d18e-45cb-a79d-d27dfa315d2b/util/0.log" Sep 30 17:31:00 crc kubenswrapper[4818]: I0930 17:31:00.113696 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt_c184175d-d18e-45cb-a79d-d27dfa315d2b/pull/0.log" Sep 30 17:31:00 crc kubenswrapper[4818]: I0930 17:31:00.289639 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt_c184175d-d18e-45cb-a79d-d27dfa315d2b/pull/0.log" Sep 30 17:31:00 crc kubenswrapper[4818]: I0930 17:31:00.332218 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt_c184175d-d18e-45cb-a79d-d27dfa315d2b/util/0.log" Sep 30 17:31:00 crc kubenswrapper[4818]: I0930 17:31:00.341437 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3ddef6706cdb0a733046dc68e3ef40061df35e198348c12adc335e4ea7hj9vt_c184175d-d18e-45cb-a79d-d27dfa315d2b/extract/0.log" Sep 30 17:31:00 crc kubenswrapper[4818]: I0930 17:31:00.608991 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s_f5c9df34-4128-4412-9f8f-cba05d9e7dd1/util/0.log" Sep 30 17:31:00 crc kubenswrapper[4818]: I0930 17:31:00.855135 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s_f5c9df34-4128-4412-9f8f-cba05d9e7dd1/pull/0.log" Sep 30 17:31:00 crc kubenswrapper[4818]: I0930 17:31:00.858505 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s_f5c9df34-4128-4412-9f8f-cba05d9e7dd1/pull/0.log" Sep 30 17:31:00 crc kubenswrapper[4818]: I0930 17:31:00.893484 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s_f5c9df34-4128-4412-9f8f-cba05d9e7dd1/util/0.log" Sep 30 17:31:00 crc kubenswrapper[4818]: I0930 17:31:00.999487 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s_f5c9df34-4128-4412-9f8f-cba05d9e7dd1/util/0.log" Sep 30 17:31:01 crc kubenswrapper[4818]: I0930 17:31:01.027276 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s_f5c9df34-4128-4412-9f8f-cba05d9e7dd1/pull/0.log" Sep 30 17:31:01 crc kubenswrapper[4818]: I0930 17:31:01.082548 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b8bf0308b7debc935a539c9820c12ccc3df53b6b866edc81f4649beb59k279s_f5c9df34-4128-4412-9f8f-cba05d9e7dd1/extract/0.log" Sep 30 17:31:01 crc kubenswrapper[4818]: I0930 17:31:01.195121 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-6ff8b75857-q4g6w_38a968a2-8f4f-4389-8ee1-852f92ffcb4b/kube-rbac-proxy/0.log" Sep 30 17:31:01 crc kubenswrapper[4818]: I0930 17:31:01.197308 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-6ff8b75857-q4g6w_38a968a2-8f4f-4389-8ee1-852f92ffcb4b/manager/0.log" Sep 30 17:31:01 crc kubenswrapper[4818]: I0930 17:31:01.325550 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-644bddb6d8-45xpz_04b289df-a81e-43b7-8aa1-66c50deeccf6/kube-rbac-proxy/0.log" Sep 30 17:31:01 crc kubenswrapper[4818]: I0930 17:31:01.424937 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-644bddb6d8-45xpz_04b289df-a81e-43b7-8aa1-66c50deeccf6/manager/0.log" Sep 30 17:31:01 crc kubenswrapper[4818]: I0930 17:31:01.487480 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-84f4f7b77b-tkdbz_7e2b35e1-f9e8-4ba0-88a5-9cbe8434942b/kube-rbac-proxy/0.log" Sep 30 17:31:01 crc kubenswrapper[4818]: I0930 17:31:01.548691 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-84f4f7b77b-tkdbz_7e2b35e1-f9e8-4ba0-88a5-9cbe8434942b/manager/0.log" Sep 30 17:31:01 crc kubenswrapper[4818]: I0930 17:31:01.615532 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-84958c4d49-r6vlr_ef92660c-59b4-4bf2-ae84-1873db0c94b2/kube-rbac-proxy/0.log" Sep 30 17:31:01 crc kubenswrapper[4818]: I0930 17:31:01.668686 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-84958c4d49-r6vlr_ef92660c-59b4-4bf2-ae84-1873db0c94b2/manager/0.log" Sep 30 17:31:01 crc kubenswrapper[4818]: I0930 17:31:01.825609 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5d889d78cf-hgrdk_e8be5e01-df2e-4b18-a8bd-9b48b962f487/manager/0.log" Sep 30 17:31:01 crc kubenswrapper[4818]: I0930 17:31:01.876230 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5d889d78cf-hgrdk_e8be5e01-df2e-4b18-a8bd-9b48b962f487/kube-rbac-proxy/0.log" Sep 30 17:31:02 crc kubenswrapper[4818]: I0930 17:31:02.031380 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-9f4696d94-8jqzd_91d19c88-bc5e-4741-bae8-ac4cfcf5a3b8/manager/0.log" Sep 30 17:31:02 crc kubenswrapper[4818]: I0930 17:31:02.038317 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-9f4696d94-8jqzd_91d19c88-bc5e-4741-bae8-ac4cfcf5a3b8/kube-rbac-proxy/0.log" Sep 30 17:31:02 crc kubenswrapper[4818]: I0930 17:31:02.115898 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-7d857cc749-d4c9x_8682f68d-8f1a-40c2-a06f-412cf86e26db/kube-rbac-proxy/0.log" Sep 30 17:31:02 crc kubenswrapper[4818]: I0930 17:31:02.284774 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-7d857cc749-d4c9x_8682f68d-8f1a-40c2-a06f-412cf86e26db/manager/0.log" Sep 30 17:31:02 crc kubenswrapper[4818]: I0930 17:31:02.308607 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-7975b88857-6gvjh_609a391b-24d6-41f8-ad04-2c0a6e35de6b/kube-rbac-proxy/0.log" Sep 30 17:31:02 crc kubenswrapper[4818]: I0930 17:31:02.345825 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-7975b88857-6gvjh_609a391b-24d6-41f8-ad04-2c0a6e35de6b/manager/0.log" Sep 30 17:31:02 crc kubenswrapper[4818]: I0930 17:31:02.523946 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-5bd55b4bff-bk24d_d4069512-9cf0-4fd3-839a-4afc857dec61/kube-rbac-proxy/0.log" Sep 30 17:31:02 crc kubenswrapper[4818]: I0930 17:31:02.656478 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-5bd55b4bff-bk24d_d4069512-9cf0-4fd3-839a-4afc857dec61/manager/0.log" Sep 30 17:31:02 crc kubenswrapper[4818]: I0930 17:31:02.776337 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-6d68dbc695-shjzt_ea4afb9c-f223-4571-8d05-a4ed581c8116/manager/0.log" Sep 30 17:31:02 crc kubenswrapper[4818]: I0930 17:31:02.783662 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-6d68dbc695-shjzt_ea4afb9c-f223-4571-8d05-a4ed581c8116/kube-rbac-proxy/0.log" Sep 30 17:31:02 crc kubenswrapper[4818]: I0930 17:31:02.885240 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-88c7-2wcsq_3707a523-c522-4172-9844-75e296641307/kube-rbac-proxy/0.log" Sep 30 17:31:03 crc kubenswrapper[4818]: I0930 17:31:03.042289 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-88c7-2wcsq_3707a523-c522-4172-9844-75e296641307/manager/0.log" Sep 30 17:31:03 crc kubenswrapper[4818]: I0930 17:31:03.088669 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-64d7b59854-v6kc9_bd72fadd-ea9a-43ab-9817-83b6a33b60fb/manager/0.log" Sep 30 17:31:03 crc kubenswrapper[4818]: I0930 17:31:03.120532 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-64d7b59854-v6kc9_bd72fadd-ea9a-43ab-9817-83b6a33b60fb/kube-rbac-proxy/0.log" Sep 30 17:31:03 crc kubenswrapper[4818]: I0930 17:31:03.235468 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-c7c776c96-7kj24_211a9bb6-3ab5-47e6-92e4-32eb396dd4dc/kube-rbac-proxy/0.log" Sep 30 17:31:03 crc kubenswrapper[4818]: I0930 17:31:03.270773 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-c7c776c96-7kj24_211a9bb6-3ab5-47e6-92e4-32eb396dd4dc/manager/0.log" Sep 30 17:31:03 crc kubenswrapper[4818]: I0930 17:31:03.406763 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-76fcc6dc7c-lnnkn_ec885f64-c0b7-4541-826e-2405c7a8c4e6/manager/0.log" Sep 30 17:31:03 crc kubenswrapper[4818]: I0930 17:31:03.448442 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-76fcc6dc7c-lnnkn_ec885f64-c0b7-4541-826e-2405c7a8c4e6/kube-rbac-proxy/0.log" Sep 30 17:31:03 crc kubenswrapper[4818]: I0930 17:31:03.494723 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6d776955-2x6qj_d11ea82b-7b70-49df-a288-673cb6ee9e9a/kube-rbac-proxy/0.log" Sep 30 17:31:03 crc kubenswrapper[4818]: I0930 17:31:03.506173 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6d776955-2x6qj_d11ea82b-7b70-49df-a288-673cb6ee9e9a/manager/0.log" Sep 30 17:31:03 crc kubenswrapper[4818]: I0930 17:31:03.665764 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-b7d9776bd-qhghl_884fad81-43e8-4b9c-b517-83d24d16f9cd/kube-rbac-proxy/0.log" Sep 30 17:31:03 crc kubenswrapper[4818]: I0930 17:31:03.729724 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-cwj8d_99558b8b-4cbe-4868-aca6-90e66d06160f/registry-server/0.log" Sep 30 17:31:03 crc kubenswrapper[4818]: I0930 17:31:03.920189 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-9976ff44c-wn6hs_92b0d181-cc90-43f8-a3a4-86a9a65b4c73/kube-rbac-proxy/0.log" Sep 30 17:31:04 crc kubenswrapper[4818]: I0930 17:31:04.003373 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-b7d9776bd-qhghl_884fad81-43e8-4b9c-b517-83d24d16f9cd/manager/0.log" Sep 30 17:31:04 crc kubenswrapper[4818]: I0930 17:31:04.095517 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-9976ff44c-wn6hs_92b0d181-cc90-43f8-a3a4-86a9a65b4c73/manager/0.log" Sep 30 17:31:04 crc kubenswrapper[4818]: I0930 17:31:04.218657 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-589c58c6c-52hvk_4b6ffd9b-6e0e-44f3-bce2-19c1fdf8d0ee/kube-rbac-proxy/0.log" Sep 30 17:31:04 crc kubenswrapper[4818]: I0930 17:31:04.270614 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-589c58c6c-52hvk_4b6ffd9b-6e0e-44f3-bce2-19c1fdf8d0ee/manager/0.log" Sep 30 17:31:04 crc kubenswrapper[4818]: I0930 17:31:04.344636 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-79d8469568-dq42t_347237b8-ee53-432a-932b-d7e2488b253f/operator/0.log" Sep 30 17:31:04 crc kubenswrapper[4818]: I0930 17:31:04.435955 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-bc7dc7bd9-jl7hb_02f33697-4faf-4e8d-8d78-77a6e8ad7d72/manager/0.log" Sep 30 17:31:04 crc kubenswrapper[4818]: I0930 17:31:04.439852 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-bc7dc7bd9-jl7hb_02f33697-4faf-4e8d-8d78-77a6e8ad7d72/kube-rbac-proxy/0.log" Sep 30 17:31:04 crc kubenswrapper[4818]: I0930 17:31:04.565894 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-b8d54b5d7-vn27d_ac78c97f-d06d-4817-aba8-145f7ec5c3ee/kube-rbac-proxy/0.log" Sep 30 17:31:04 crc kubenswrapper[4818]: I0930 17:31:04.691629 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-f66b554c6-lcj9j_43743bf4-0c17-4266-b31d-a17cf0e6d330/kube-rbac-proxy/0.log" Sep 30 17:31:04 crc kubenswrapper[4818]: I0930 17:31:04.851108 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-b8d54b5d7-vn27d_ac78c97f-d06d-4817-aba8-145f7ec5c3ee/manager/0.log" Sep 30 17:31:04 crc kubenswrapper[4818]: I0930 17:31:04.870830 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-f66b554c6-lcj9j_43743bf4-0c17-4266-b31d-a17cf0e6d330/manager/0.log" Sep 30 17:31:04 crc kubenswrapper[4818]: I0930 17:31:04.972897 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-54745555-zlxdf_7373131a-0a63-4ecd-a6ff-450382e6011b/kube-rbac-proxy/0.log" Sep 30 17:31:05 crc kubenswrapper[4818]: I0930 17:31:05.131313 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-54745555-zlxdf_7373131a-0a63-4ecd-a6ff-450382e6011b/manager/0.log" Sep 30 17:31:05 crc kubenswrapper[4818]: I0930 17:31:05.149447 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-index-cb8dm_12b6c7ef-2f34-4885-9ed0-0522c8057303/registry-server/0.log" Sep 30 17:31:22 crc kubenswrapper[4818]: I0930 17:31:22.848316 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-xq6hp_317be33e-ccbd-44f6-aafa-f77a1f2ba6eb/control-plane-machine-set-operator/0.log" Sep 30 17:31:22 crc kubenswrapper[4818]: I0930 17:31:22.938883 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-7rnqd_4bb9049c-5f9a-4260-bd40-3140669d6701/machine-api-operator/0.log" Sep 30 17:31:22 crc kubenswrapper[4818]: I0930 17:31:22.999645 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-7rnqd_4bb9049c-5f9a-4260-bd40-3140669d6701/kube-rbac-proxy/0.log" Sep 30 17:31:24 crc kubenswrapper[4818]: I0930 17:31:24.023393 4818 scope.go:117] "RemoveContainer" containerID="c38d84203642ce7e577b1c2f01c9149000e12173cbdcc51807297f44c17d74f6" Sep 30 17:31:24 crc kubenswrapper[4818]: I0930 17:31:24.087575 4818 scope.go:117] "RemoveContainer" containerID="1874469c28fd86902b9653580085c4953c8e52473fec3f05bfebbcc28b9c2103" Sep 30 17:31:24 crc kubenswrapper[4818]: I0930 17:31:24.106675 4818 scope.go:117] "RemoveContainer" containerID="03712bb8cdcdc7a35c6a20dc3c6225800a9bbd4a22a43c01f7e5a377042bdf39" Sep 30 17:31:36 crc kubenswrapper[4818]: I0930 17:31:36.464347 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-7d4cc89fcb-lmzph_75887178-d0ff-43b3-9fd3-e8674c7a5082/cert-manager-controller/0.log" Sep 30 17:31:36 crc kubenswrapper[4818]: I0930 17:31:36.614483 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7d9f95dbf-nhcf7_06937cd7-9130-454f-9a19-afa84e75da83/cert-manager-cainjector/0.log" Sep 30 17:31:36 crc kubenswrapper[4818]: I0930 17:31:36.750496 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-d969966f-8l42t_0ed641ab-239c-4e02-a872-800d04cb8655/cert-manager-webhook/0.log" Sep 30 17:31:48 crc kubenswrapper[4818]: I0930 17:31:48.045104 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-2jn4f"] Sep 30 17:31:48 crc kubenswrapper[4818]: I0930 17:31:48.050518 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-2jn4f"] Sep 30 17:31:50 crc kubenswrapper[4818]: I0930 17:31:50.032084 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bfb209b6-ed72-4796-92f5-85372aeaf10c" path="/var/lib/kubelet/pods/bfb209b6-ed72-4796-92f5-85372aeaf10c/volumes" Sep 30 17:31:50 crc kubenswrapper[4818]: I0930 17:31:50.459340 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-864bb6dfb5-sdlfz_61d0be16-a287-4fcc-ba56-4ba51fa86b60/nmstate-console-plugin/0.log" Sep 30 17:31:50 crc kubenswrapper[4818]: I0930 17:31:50.682972 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-fdv7d_217fe5c6-1bd5-4a1b-b936-cc80ba5ce0b5/nmstate-handler/0.log" Sep 30 17:31:50 crc kubenswrapper[4818]: I0930 17:31:50.730683 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-58fcddf996-zhjhv_b5de4ad7-8ab5-4b48-8c9f-504eaea0fd97/kube-rbac-proxy/0.log" Sep 30 17:31:50 crc kubenswrapper[4818]: I0930 17:31:50.793326 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-58fcddf996-zhjhv_b5de4ad7-8ab5-4b48-8c9f-504eaea0fd97/nmstate-metrics/0.log" Sep 30 17:31:51 crc kubenswrapper[4818]: I0930 17:31:51.021446 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6d689559c5-6ndvg_edeb5db5-c3ef-4f9a-ba10-94b51f80d98c/nmstate-webhook/0.log" Sep 30 17:31:51 crc kubenswrapper[4818]: I0930 17:31:51.047950 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5d6f6cfd66-qkrhb_53a9a60d-25e2-4794-adb3-83cd8c2df8b5/nmstate-operator/0.log" Sep 30 17:31:52 crc kubenswrapper[4818]: I0930 17:31:52.595552 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:31:52 crc kubenswrapper[4818]: I0930 17:31:52.595940 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:32:06 crc kubenswrapper[4818]: I0930 17:32:06.097327 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5d688f5ffc-46jj6_7e3434fb-c492-4b7d-8d60-42e4bd658f43/kube-rbac-proxy/0.log" Sep 30 17:32:06 crc kubenswrapper[4818]: I0930 17:32:06.334182 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5d688f5ffc-46jj6_7e3434fb-c492-4b7d-8d60-42e4bd658f43/controller/0.log" Sep 30 17:32:06 crc kubenswrapper[4818]: I0930 17:32:06.390810 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dwbnt_82c7a122-7887-4d75-a960-c8aa40a748f4/cp-frr-files/0.log" Sep 30 17:32:06 crc kubenswrapper[4818]: I0930 17:32:06.610459 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dwbnt_82c7a122-7887-4d75-a960-c8aa40a748f4/cp-reloader/0.log" Sep 30 17:32:06 crc kubenswrapper[4818]: I0930 17:32:06.620659 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dwbnt_82c7a122-7887-4d75-a960-c8aa40a748f4/cp-reloader/0.log" Sep 30 17:32:06 crc kubenswrapper[4818]: I0930 17:32:06.645708 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dwbnt_82c7a122-7887-4d75-a960-c8aa40a748f4/cp-frr-files/0.log" Sep 30 17:32:06 crc kubenswrapper[4818]: I0930 17:32:06.667505 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dwbnt_82c7a122-7887-4d75-a960-c8aa40a748f4/cp-metrics/0.log" Sep 30 17:32:06 crc kubenswrapper[4818]: I0930 17:32:06.853317 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dwbnt_82c7a122-7887-4d75-a960-c8aa40a748f4/cp-metrics/0.log" Sep 30 17:32:06 crc kubenswrapper[4818]: I0930 17:32:06.853682 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dwbnt_82c7a122-7887-4d75-a960-c8aa40a748f4/cp-frr-files/0.log" Sep 30 17:32:06 crc kubenswrapper[4818]: I0930 17:32:06.871303 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dwbnt_82c7a122-7887-4d75-a960-c8aa40a748f4/cp-reloader/0.log" Sep 30 17:32:06 crc kubenswrapper[4818]: I0930 17:32:06.894411 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dwbnt_82c7a122-7887-4d75-a960-c8aa40a748f4/cp-metrics/0.log" Sep 30 17:32:07 crc kubenswrapper[4818]: I0930 17:32:07.111008 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dwbnt_82c7a122-7887-4d75-a960-c8aa40a748f4/cp-reloader/0.log" Sep 30 17:32:07 crc kubenswrapper[4818]: I0930 17:32:07.119762 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dwbnt_82c7a122-7887-4d75-a960-c8aa40a748f4/controller/0.log" Sep 30 17:32:07 crc kubenswrapper[4818]: I0930 17:32:07.124090 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dwbnt_82c7a122-7887-4d75-a960-c8aa40a748f4/cp-metrics/0.log" Sep 30 17:32:07 crc kubenswrapper[4818]: I0930 17:32:07.125963 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dwbnt_82c7a122-7887-4d75-a960-c8aa40a748f4/cp-frr-files/0.log" Sep 30 17:32:07 crc kubenswrapper[4818]: I0930 17:32:07.295497 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dwbnt_82c7a122-7887-4d75-a960-c8aa40a748f4/frr-metrics/0.log" Sep 30 17:32:07 crc kubenswrapper[4818]: I0930 17:32:07.320169 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dwbnt_82c7a122-7887-4d75-a960-c8aa40a748f4/kube-rbac-proxy/0.log" Sep 30 17:32:07 crc kubenswrapper[4818]: I0930 17:32:07.340179 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dwbnt_82c7a122-7887-4d75-a960-c8aa40a748f4/kube-rbac-proxy-frr/0.log" Sep 30 17:32:07 crc kubenswrapper[4818]: I0930 17:32:07.531224 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dwbnt_82c7a122-7887-4d75-a960-c8aa40a748f4/reloader/0.log" Sep 30 17:32:07 crc kubenswrapper[4818]: I0930 17:32:07.547300 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-5478bdb765-kmldl_23b571c9-e316-4943-b334-505074c4a50e/frr-k8s-webhook-server/0.log" Sep 30 17:32:07 crc kubenswrapper[4818]: I0930 17:32:07.741722 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6f59b986b5-d4h6m_a43b85d0-34d6-49ad-9fc0-7580b2f2ef36/manager/0.log" Sep 30 17:32:07 crc kubenswrapper[4818]: I0930 17:32:07.973564 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-795889b56f-gt7lt_f1beebb1-d722-453d-ab32-f986bfd746df/webhook-server/0.log" Sep 30 17:32:08 crc kubenswrapper[4818]: I0930 17:32:08.036390 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-s4j9s_26bab40e-d8f8-478e-aafc-bbd2f7368f72/kube-rbac-proxy/0.log" Sep 30 17:32:08 crc kubenswrapper[4818]: I0930 17:32:08.327102 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dwbnt_82c7a122-7887-4d75-a960-c8aa40a748f4/frr/0.log" Sep 30 17:32:08 crc kubenswrapper[4818]: I0930 17:32:08.452426 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-s4j9s_26bab40e-d8f8-478e-aafc-bbd2f7368f72/speaker/0.log" Sep 30 17:32:22 crc kubenswrapper[4818]: I0930 17:32:22.595910 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:32:22 crc kubenswrapper[4818]: I0930 17:32:22.596915 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:32:24 crc kubenswrapper[4818]: I0930 17:32:24.185727 4818 scope.go:117] "RemoveContainer" containerID="89fb1461ee943e21ce0cae0da63e60206b0104c82c883623f9bc4d06ae4d41d5" Sep 30 17:32:24 crc kubenswrapper[4818]: I0930 17:32:24.213265 4818 scope.go:117] "RemoveContainer" containerID="ace498affd6e119be4a76f4999f60a2b1e10e048bd4cb979ac71ae983d510a7e" Sep 30 17:32:24 crc kubenswrapper[4818]: I0930 17:32:24.237812 4818 scope.go:117] "RemoveContainer" containerID="748ed8ecf4f69407685aa8d303a631a86821253048aa777f156ab78fb16920e1" Sep 30 17:32:24 crc kubenswrapper[4818]: I0930 17:32:24.278678 4818 scope.go:117] "RemoveContainer" containerID="bcfea6f115fd374626dc950fd64bd9690dec691f687347dd78a97dbb3a5503a6" Sep 30 17:32:31 crc kubenswrapper[4818]: I0930 17:32:31.110307 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_alertmanager-metric-storage-0_81366e0f-12de-49bd-8834-68b2d0da319b/init-config-reloader/0.log" Sep 30 17:32:31 crc kubenswrapper[4818]: I0930 17:32:31.296296 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_alertmanager-metric-storage-0_81366e0f-12de-49bd-8834-68b2d0da319b/init-config-reloader/0.log" Sep 30 17:32:31 crc kubenswrapper[4818]: I0930 17:32:31.330322 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_alertmanager-metric-storage-0_81366e0f-12de-49bd-8834-68b2d0da319b/alertmanager/0.log" Sep 30 17:32:31 crc kubenswrapper[4818]: I0930 17:32:31.389324 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_alertmanager-metric-storage-0_81366e0f-12de-49bd-8834-68b2d0da319b/config-reloader/0.log" Sep 30 17:32:31 crc kubenswrapper[4818]: I0930 17:32:31.527027 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_ceilometer-0_907c0b05-3239-4941-b998-a132f9a12339/ceilometer-central-agent/0.log" Sep 30 17:32:31 crc kubenswrapper[4818]: I0930 17:32:31.590108 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_ceilometer-0_907c0b05-3239-4941-b998-a132f9a12339/ceilometer-notification-agent/0.log" Sep 30 17:32:31 crc kubenswrapper[4818]: I0930 17:32:31.612362 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_ceilometer-0_907c0b05-3239-4941-b998-a132f9a12339/proxy-httpd/0.log" Sep 30 17:32:31 crc kubenswrapper[4818]: I0930 17:32:31.686388 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_ceilometer-0_907c0b05-3239-4941-b998-a132f9a12339/sg-core/0.log" Sep 30 17:32:31 crc kubenswrapper[4818]: I0930 17:32:31.879829 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_keystone-55f7bfb7c9-pcc8n_32034362-4dd5-4231-b991-837462326e1a/keystone-api/0.log" Sep 30 17:32:31 crc kubenswrapper[4818]: I0930 17:32:31.934060 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_kube-state-metrics-0_b0f696f5-cad9-4a6b-8f09-2a7f6db599b0/kube-state-metrics/0.log" Sep 30 17:32:32 crc kubenswrapper[4818]: I0930 17:32:32.207540 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_openstack-galera-0_7a7b6023-3d68-4aa9-a911-59017220edbf/mysql-bootstrap/0.log" Sep 30 17:32:32 crc kubenswrapper[4818]: I0930 17:32:32.426423 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_openstack-galera-0_7a7b6023-3d68-4aa9-a911-59017220edbf/mysql-bootstrap/0.log" Sep 30 17:32:32 crc kubenswrapper[4818]: I0930 17:32:32.435283 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_openstack-galera-0_7a7b6023-3d68-4aa9-a911-59017220edbf/galera/0.log" Sep 30 17:32:32 crc kubenswrapper[4818]: I0930 17:32:32.639627 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_openstackclient_9470aa86-6546-4a31-a0d5-6377490de3b1/openstackclient/0.log" Sep 30 17:32:32 crc kubenswrapper[4818]: I0930 17:32:32.740332 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_prometheus-metric-storage-0_d0294316-c783-4cab-98e5-9435e52c6979/init-config-reloader/0.log" Sep 30 17:32:32 crc kubenswrapper[4818]: I0930 17:32:32.946566 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_prometheus-metric-storage-0_d0294316-c783-4cab-98e5-9435e52c6979/config-reloader/0.log" Sep 30 17:32:32 crc kubenswrapper[4818]: I0930 17:32:32.967456 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_prometheus-metric-storage-0_d0294316-c783-4cab-98e5-9435e52c6979/prometheus/0.log" Sep 30 17:32:33 crc kubenswrapper[4818]: I0930 17:32:33.010816 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_prometheus-metric-storage-0_d0294316-c783-4cab-98e5-9435e52c6979/init-config-reloader/0.log" Sep 30 17:32:33 crc kubenswrapper[4818]: I0930 17:32:33.179312 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_prometheus-metric-storage-0_d0294316-c783-4cab-98e5-9435e52c6979/thanos-sidecar/0.log" Sep 30 17:32:33 crc kubenswrapper[4818]: I0930 17:32:33.278186 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_rabbitmq-notifications-server-0_68a4cecf-f627-497d-a682-5092ea0b3298/setup-container/0.log" Sep 30 17:32:33 crc kubenswrapper[4818]: I0930 17:32:33.452381 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_rabbitmq-notifications-server-0_68a4cecf-f627-497d-a682-5092ea0b3298/setup-container/0.log" Sep 30 17:32:33 crc kubenswrapper[4818]: I0930 17:32:33.545673 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_rabbitmq-notifications-server-0_68a4cecf-f627-497d-a682-5092ea0b3298/rabbitmq/0.log" Sep 30 17:32:33 crc kubenswrapper[4818]: I0930 17:32:33.889881 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_rabbitmq-server-0_f707c20f-09e2-4aa7-9a18-5b37f2050e45/setup-container/0.log" Sep 30 17:32:34 crc kubenswrapper[4818]: I0930 17:32:34.035603 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_rabbitmq-server-0_f707c20f-09e2-4aa7-9a18-5b37f2050e45/setup-container/0.log" Sep 30 17:32:34 crc kubenswrapper[4818]: I0930 17:32:34.118702 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_rabbitmq-server-0_f707c20f-09e2-4aa7-9a18-5b37f2050e45/rabbitmq/0.log" Sep 30 17:32:43 crc kubenswrapper[4818]: I0930 17:32:43.197060 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_memcached-0_44e411aa-6171-4d94-8791-05a653dee924/memcached/0.log" Sep 30 17:32:50 crc kubenswrapper[4818]: I0930 17:32:50.848776 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6_2b142dea-f8b9-4930-b066-64dd84db0dd5/util/0.log" Sep 30 17:32:51 crc kubenswrapper[4818]: I0930 17:32:51.050938 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6_2b142dea-f8b9-4930-b066-64dd84db0dd5/util/0.log" Sep 30 17:32:51 crc kubenswrapper[4818]: I0930 17:32:51.084999 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6_2b142dea-f8b9-4930-b066-64dd84db0dd5/pull/0.log" Sep 30 17:32:51 crc kubenswrapper[4818]: I0930 17:32:51.152167 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6_2b142dea-f8b9-4930-b066-64dd84db0dd5/pull/0.log" Sep 30 17:32:51 crc kubenswrapper[4818]: I0930 17:32:51.303733 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6_2b142dea-f8b9-4930-b066-64dd84db0dd5/util/0.log" Sep 30 17:32:51 crc kubenswrapper[4818]: I0930 17:32:51.319719 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6_2b142dea-f8b9-4930-b066-64dd84db0dd5/extract/0.log" Sep 30 17:32:51 crc kubenswrapper[4818]: I0930 17:32:51.325497 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb692d4x6_2b142dea-f8b9-4930-b066-64dd84db0dd5/pull/0.log" Sep 30 17:32:51 crc kubenswrapper[4818]: I0930 17:32:51.508442 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql_f8d58532-c164-4d67-be63-2324034f1706/util/0.log" Sep 30 17:32:51 crc kubenswrapper[4818]: I0930 17:32:51.633390 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql_f8d58532-c164-4d67-be63-2324034f1706/util/0.log" Sep 30 17:32:51 crc kubenswrapper[4818]: I0930 17:32:51.649851 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql_f8d58532-c164-4d67-be63-2324034f1706/pull/0.log" Sep 30 17:32:51 crc kubenswrapper[4818]: I0930 17:32:51.689266 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql_f8d58532-c164-4d67-be63-2324034f1706/pull/0.log" Sep 30 17:32:51 crc kubenswrapper[4818]: I0930 17:32:51.852591 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql_f8d58532-c164-4d67-be63-2324034f1706/pull/0.log" Sep 30 17:32:51 crc kubenswrapper[4818]: I0930 17:32:51.857239 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql_f8d58532-c164-4d67-be63-2324034f1706/extract/0.log" Sep 30 17:32:51 crc kubenswrapper[4818]: I0930 17:32:51.859813 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcxghql_f8d58532-c164-4d67-be63-2324034f1706/util/0.log" Sep 30 17:32:52 crc kubenswrapper[4818]: I0930 17:32:52.172818 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm_9088c828-53ab-421b-8509-c350596da888/util/0.log" Sep 30 17:32:52 crc kubenswrapper[4818]: I0930 17:32:52.326168 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm_9088c828-53ab-421b-8509-c350596da888/pull/0.log" Sep 30 17:32:52 crc kubenswrapper[4818]: I0930 17:32:52.340393 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm_9088c828-53ab-421b-8509-c350596da888/util/0.log" Sep 30 17:32:52 crc kubenswrapper[4818]: I0930 17:32:52.340551 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm_9088c828-53ab-421b-8509-c350596da888/pull/0.log" Sep 30 17:32:52 crc kubenswrapper[4818]: I0930 17:32:52.498870 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm_9088c828-53ab-421b-8509-c350596da888/extract/0.log" Sep 30 17:32:52 crc kubenswrapper[4818]: I0930 17:32:52.535615 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm_9088c828-53ab-421b-8509-c350596da888/pull/0.log" Sep 30 17:32:52 crc kubenswrapper[4818]: I0930 17:32:52.537129 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d9nkcm_9088c828-53ab-421b-8509-c350596da888/util/0.log" Sep 30 17:32:52 crc kubenswrapper[4818]: I0930 17:32:52.595458 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:32:52 crc kubenswrapper[4818]: I0930 17:32:52.595534 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:32:52 crc kubenswrapper[4818]: I0930 17:32:52.595590 4818 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" Sep 30 17:32:52 crc kubenswrapper[4818]: I0930 17:32:52.596462 4818 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"30880a11bad1ae43c066fbba66a2570618ec8b81f77a03b0a99408888162dbd7"} pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 30 17:32:52 crc kubenswrapper[4818]: I0930 17:32:52.596568 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" containerID="cri-o://30880a11bad1ae43c066fbba66a2570618ec8b81f77a03b0a99408888162dbd7" gracePeriod=600 Sep 30 17:32:52 crc kubenswrapper[4818]: I0930 17:32:52.695243 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5sl7h_8b734a68-025d-47ea-99d0-bf680e9e54cd/extract-utilities/0.log" Sep 30 17:32:52 crc kubenswrapper[4818]: I0930 17:32:52.876809 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5sl7h_8b734a68-025d-47ea-99d0-bf680e9e54cd/extract-content/0.log" Sep 30 17:32:52 crc kubenswrapper[4818]: I0930 17:32:52.895053 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5sl7h_8b734a68-025d-47ea-99d0-bf680e9e54cd/extract-utilities/0.log" Sep 30 17:32:52 crc kubenswrapper[4818]: I0930 17:32:52.915641 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5sl7h_8b734a68-025d-47ea-99d0-bf680e9e54cd/extract-content/0.log" Sep 30 17:32:53 crc kubenswrapper[4818]: I0930 17:32:53.073787 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5sl7h_8b734a68-025d-47ea-99d0-bf680e9e54cd/extract-utilities/0.log" Sep 30 17:32:53 crc kubenswrapper[4818]: I0930 17:32:53.099434 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5sl7h_8b734a68-025d-47ea-99d0-bf680e9e54cd/extract-content/0.log" Sep 30 17:32:53 crc kubenswrapper[4818]: I0930 17:32:53.365877 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-rdxdl_838572b4-547f-482a-8a5f-deb28aa2e587/extract-utilities/0.log" Sep 30 17:32:53 crc kubenswrapper[4818]: I0930 17:32:53.523773 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5sl7h_8b734a68-025d-47ea-99d0-bf680e9e54cd/registry-server/0.log" Sep 30 17:32:53 crc kubenswrapper[4818]: I0930 17:32:53.540806 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-rdxdl_838572b4-547f-482a-8a5f-deb28aa2e587/extract-utilities/0.log" Sep 30 17:32:53 crc kubenswrapper[4818]: I0930 17:32:53.567014 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-rdxdl_838572b4-547f-482a-8a5f-deb28aa2e587/extract-content/0.log" Sep 30 17:32:53 crc kubenswrapper[4818]: I0930 17:32:53.567141 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-rdxdl_838572b4-547f-482a-8a5f-deb28aa2e587/extract-content/0.log" Sep 30 17:32:53 crc kubenswrapper[4818]: I0930 17:32:53.723888 4818 generic.go:334] "Generic (PLEG): container finished" podID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerID="30880a11bad1ae43c066fbba66a2570618ec8b81f77a03b0a99408888162dbd7" exitCode=0 Sep 30 17:32:53 crc kubenswrapper[4818]: I0930 17:32:53.723955 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerDied","Data":"30880a11bad1ae43c066fbba66a2570618ec8b81f77a03b0a99408888162dbd7"} Sep 30 17:32:53 crc kubenswrapper[4818]: I0930 17:32:53.723985 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" event={"ID":"5e908152-dcb2-4b41-974d-26b03ae0254b","Type":"ContainerStarted","Data":"138389ea6f2b538591ad369e6c851dff19edb6eed76d3c022b5beb712f29f77d"} Sep 30 17:32:53 crc kubenswrapper[4818]: I0930 17:32:53.724007 4818 scope.go:117] "RemoveContainer" containerID="91f7e8ec93c45a67b99cdfcc81be1bb6dc0fa263134b36ca29bd9322e18f9233" Sep 30 17:32:53 crc kubenswrapper[4818]: I0930 17:32:53.783944 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-rdxdl_838572b4-547f-482a-8a5f-deb28aa2e587/extract-utilities/0.log" Sep 30 17:32:53 crc kubenswrapper[4818]: I0930 17:32:53.798433 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-rdxdl_838572b4-547f-482a-8a5f-deb28aa2e587/extract-content/0.log" Sep 30 17:32:53 crc kubenswrapper[4818]: I0930 17:32:53.983151 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd_e15bd4a3-2980-4e48-b222-988af1b45bb4/util/0.log" Sep 30 17:32:54 crc kubenswrapper[4818]: I0930 17:32:54.181111 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-rdxdl_838572b4-547f-482a-8a5f-deb28aa2e587/registry-server/0.log" Sep 30 17:32:54 crc kubenswrapper[4818]: I0930 17:32:54.224465 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd_e15bd4a3-2980-4e48-b222-988af1b45bb4/util/0.log" Sep 30 17:32:54 crc kubenswrapper[4818]: I0930 17:32:54.278481 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd_e15bd4a3-2980-4e48-b222-988af1b45bb4/pull/0.log" Sep 30 17:32:54 crc kubenswrapper[4818]: I0930 17:32:54.284085 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd_e15bd4a3-2980-4e48-b222-988af1b45bb4/pull/0.log" Sep 30 17:32:54 crc kubenswrapper[4818]: I0930 17:32:54.467160 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd_e15bd4a3-2980-4e48-b222-988af1b45bb4/util/0.log" Sep 30 17:32:54 crc kubenswrapper[4818]: I0930 17:32:54.501407 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-dhz7n_9ca14f10-19ae-485b-b237-7a3e0c1c701a/marketplace-operator/0.log" Sep 30 17:32:54 crc kubenswrapper[4818]: I0930 17:32:54.522450 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd_e15bd4a3-2980-4e48-b222-988af1b45bb4/extract/0.log" Sep 30 17:32:54 crc kubenswrapper[4818]: I0930 17:32:54.532796 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96ks9wd_e15bd4a3-2980-4e48-b222-988af1b45bb4/pull/0.log" Sep 30 17:32:54 crc kubenswrapper[4818]: I0930 17:32:54.670192 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fmv9v_60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c/extract-utilities/0.log" Sep 30 17:32:54 crc kubenswrapper[4818]: I0930 17:32:54.808146 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fmv9v_60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c/extract-utilities/0.log" Sep 30 17:32:54 crc kubenswrapper[4818]: I0930 17:32:54.823599 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fmv9v_60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c/extract-content/0.log" Sep 30 17:32:54 crc kubenswrapper[4818]: I0930 17:32:54.849156 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fmv9v_60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c/extract-content/0.log" Sep 30 17:32:55 crc kubenswrapper[4818]: I0930 17:32:55.027737 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fmv9v_60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c/extract-utilities/0.log" Sep 30 17:32:55 crc kubenswrapper[4818]: I0930 17:32:55.048501 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-skzzr_9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9/extract-utilities/0.log" Sep 30 17:32:55 crc kubenswrapper[4818]: I0930 17:32:55.074490 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fmv9v_60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c/extract-content/0.log" Sep 30 17:32:55 crc kubenswrapper[4818]: I0930 17:32:55.130311 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fmv9v_60d6fa53-a6a2-4a62-97a6-5cbe3c1b222c/registry-server/0.log" Sep 30 17:32:55 crc kubenswrapper[4818]: I0930 17:32:55.297038 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-skzzr_9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9/extract-content/0.log" Sep 30 17:32:55 crc kubenswrapper[4818]: I0930 17:32:55.297350 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-skzzr_9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9/extract-utilities/0.log" Sep 30 17:32:55 crc kubenswrapper[4818]: I0930 17:32:55.305355 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-skzzr_9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9/extract-content/0.log" Sep 30 17:32:55 crc kubenswrapper[4818]: I0930 17:32:55.484734 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-skzzr_9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9/extract-utilities/0.log" Sep 30 17:32:55 crc kubenswrapper[4818]: I0930 17:32:55.515555 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-skzzr_9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9/extract-content/0.log" Sep 30 17:32:55 crc kubenswrapper[4818]: I0930 17:32:55.935677 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-skzzr_9ae4ca0e-f830-45ee-bdfb-7e97f5bf28c9/registry-server/0.log" Sep 30 17:33:08 crc kubenswrapper[4818]: I0930 17:33:08.402052 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-7c8cf85677-bzv8d_96170b1f-1f7b-45df-a1e4-5d9901097907/prometheus-operator/0.log" Sep 30 17:33:08 crc kubenswrapper[4818]: I0930 17:33:08.500350 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-69977c4b67-dk5pg_118fa0c6-c8bf-4ae6-9867-9aaf6ee11824/prometheus-operator-admission-webhook/0.log" Sep 30 17:33:08 crc kubenswrapper[4818]: I0930 17:33:08.563236 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-69977c4b67-vpdnd_73811ba7-972e-40ea-a82d-40a5e56341c4/prometheus-operator-admission-webhook/0.log" Sep 30 17:33:08 crc kubenswrapper[4818]: I0930 17:33:08.837137 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-cc5f78dfc-8zx45_de961e2b-b16d-4db1-b908-5be30a74be3d/operator/0.log" Sep 30 17:33:08 crc kubenswrapper[4818]: I0930 17:33:08.932583 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-6584dc9448-hfpnh_576f7b1f-338a-4be8-a516-10ef07224f16/observability-ui-dashboards/0.log" Sep 30 17:33:09 crc kubenswrapper[4818]: I0930 17:33:09.029835 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-54bc95c9fb-qnpbg_88a12b8d-6531-406b-becd-d70ba32fa6c1/perses-operator/0.log" Sep 30 17:33:24 crc kubenswrapper[4818]: I0930 17:33:24.432424 4818 scope.go:117] "RemoveContainer" containerID="f9ba60ee08f4f5c5b0c3e91328071b34af72409c0c8b6d65deb4688c4e2c43c2" Sep 30 17:33:24 crc kubenswrapper[4818]: I0930 17:33:24.468436 4818 scope.go:117] "RemoveContainer" containerID="eff88837534f86422a3581d1f5d7096f69fc81784a5310142265a94e5d9ad6cf" Sep 30 17:34:00 crc kubenswrapper[4818]: I0930 17:34:00.147153 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8tt4f"] Sep 30 17:34:00 crc kubenswrapper[4818]: E0930 17:34:00.147967 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc" containerName="collect-profiles" Sep 30 17:34:00 crc kubenswrapper[4818]: I0930 17:34:00.147980 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc" containerName="collect-profiles" Sep 30 17:34:00 crc kubenswrapper[4818]: I0930 17:34:00.148132 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc1d8719-4dda-4a8e-a2ed-e8ffd5477bbc" containerName="collect-profiles" Sep 30 17:34:00 crc kubenswrapper[4818]: I0930 17:34:00.149195 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8tt4f" Sep 30 17:34:00 crc kubenswrapper[4818]: I0930 17:34:00.167750 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8tt4f"] Sep 30 17:34:00 crc kubenswrapper[4818]: I0930 17:34:00.309256 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5ae4d8c-0c5e-408f-884b-91c80e0ef861-utilities\") pod \"community-operators-8tt4f\" (UID: \"e5ae4d8c-0c5e-408f-884b-91c80e0ef861\") " pod="openshift-marketplace/community-operators-8tt4f" Sep 30 17:34:00 crc kubenswrapper[4818]: I0930 17:34:00.309310 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bd6b\" (UniqueName: \"kubernetes.io/projected/e5ae4d8c-0c5e-408f-884b-91c80e0ef861-kube-api-access-8bd6b\") pod \"community-operators-8tt4f\" (UID: \"e5ae4d8c-0c5e-408f-884b-91c80e0ef861\") " pod="openshift-marketplace/community-operators-8tt4f" Sep 30 17:34:00 crc kubenswrapper[4818]: I0930 17:34:00.309513 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5ae4d8c-0c5e-408f-884b-91c80e0ef861-catalog-content\") pod \"community-operators-8tt4f\" (UID: \"e5ae4d8c-0c5e-408f-884b-91c80e0ef861\") " pod="openshift-marketplace/community-operators-8tt4f" Sep 30 17:34:00 crc kubenswrapper[4818]: I0930 17:34:00.411161 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5ae4d8c-0c5e-408f-884b-91c80e0ef861-utilities\") pod \"community-operators-8tt4f\" (UID: \"e5ae4d8c-0c5e-408f-884b-91c80e0ef861\") " pod="openshift-marketplace/community-operators-8tt4f" Sep 30 17:34:00 crc kubenswrapper[4818]: I0930 17:34:00.411214 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bd6b\" (UniqueName: \"kubernetes.io/projected/e5ae4d8c-0c5e-408f-884b-91c80e0ef861-kube-api-access-8bd6b\") pod \"community-operators-8tt4f\" (UID: \"e5ae4d8c-0c5e-408f-884b-91c80e0ef861\") " pod="openshift-marketplace/community-operators-8tt4f" Sep 30 17:34:00 crc kubenswrapper[4818]: I0930 17:34:00.411251 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5ae4d8c-0c5e-408f-884b-91c80e0ef861-catalog-content\") pod \"community-operators-8tt4f\" (UID: \"e5ae4d8c-0c5e-408f-884b-91c80e0ef861\") " pod="openshift-marketplace/community-operators-8tt4f" Sep 30 17:34:00 crc kubenswrapper[4818]: I0930 17:34:00.411650 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5ae4d8c-0c5e-408f-884b-91c80e0ef861-utilities\") pod \"community-operators-8tt4f\" (UID: \"e5ae4d8c-0c5e-408f-884b-91c80e0ef861\") " pod="openshift-marketplace/community-operators-8tt4f" Sep 30 17:34:00 crc kubenswrapper[4818]: I0930 17:34:00.411836 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5ae4d8c-0c5e-408f-884b-91c80e0ef861-catalog-content\") pod \"community-operators-8tt4f\" (UID: \"e5ae4d8c-0c5e-408f-884b-91c80e0ef861\") " pod="openshift-marketplace/community-operators-8tt4f" Sep 30 17:34:00 crc kubenswrapper[4818]: I0930 17:34:00.435816 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bd6b\" (UniqueName: \"kubernetes.io/projected/e5ae4d8c-0c5e-408f-884b-91c80e0ef861-kube-api-access-8bd6b\") pod \"community-operators-8tt4f\" (UID: \"e5ae4d8c-0c5e-408f-884b-91c80e0ef861\") " pod="openshift-marketplace/community-operators-8tt4f" Sep 30 17:34:00 crc kubenswrapper[4818]: I0930 17:34:00.471999 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8tt4f" Sep 30 17:34:01 crc kubenswrapper[4818]: I0930 17:34:01.017422 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8tt4f"] Sep 30 17:34:01 crc kubenswrapper[4818]: I0930 17:34:01.386433 4818 generic.go:334] "Generic (PLEG): container finished" podID="e5ae4d8c-0c5e-408f-884b-91c80e0ef861" containerID="81bcd4ac3e5ab2c22a272ee4ad743a0e292a8bf9642e05509c98674a28da2f0e" exitCode=0 Sep 30 17:34:01 crc kubenswrapper[4818]: I0930 17:34:01.386528 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8tt4f" event={"ID":"e5ae4d8c-0c5e-408f-884b-91c80e0ef861","Type":"ContainerDied","Data":"81bcd4ac3e5ab2c22a272ee4ad743a0e292a8bf9642e05509c98674a28da2f0e"} Sep 30 17:34:01 crc kubenswrapper[4818]: I0930 17:34:01.386684 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8tt4f" event={"ID":"e5ae4d8c-0c5e-408f-884b-91c80e0ef861","Type":"ContainerStarted","Data":"6b27bc6243c69a6fa7edf0d3597dde776e9649dc2f530f86db0f5caa2f25f094"} Sep 30 17:34:02 crc kubenswrapper[4818]: I0930 17:34:02.400279 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8tt4f" event={"ID":"e5ae4d8c-0c5e-408f-884b-91c80e0ef861","Type":"ContainerStarted","Data":"8add5270d42bf1b8365ee8fb2422061f019cf6299e0624d89e7409ae6c5dad3e"} Sep 30 17:34:02 crc kubenswrapper[4818]: E0930 17:34:02.806656 4818 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode5ae4d8c_0c5e_408f_884b_91c80e0ef861.slice/crio-8add5270d42bf1b8365ee8fb2422061f019cf6299e0624d89e7409ae6c5dad3e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode5ae4d8c_0c5e_408f_884b_91c80e0ef861.slice/crio-conmon-8add5270d42bf1b8365ee8fb2422061f019cf6299e0624d89e7409ae6c5dad3e.scope\": RecentStats: unable to find data in memory cache]" Sep 30 17:34:03 crc kubenswrapper[4818]: I0930 17:34:03.414739 4818 generic.go:334] "Generic (PLEG): container finished" podID="e5ae4d8c-0c5e-408f-884b-91c80e0ef861" containerID="8add5270d42bf1b8365ee8fb2422061f019cf6299e0624d89e7409ae6c5dad3e" exitCode=0 Sep 30 17:34:03 crc kubenswrapper[4818]: I0930 17:34:03.414793 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8tt4f" event={"ID":"e5ae4d8c-0c5e-408f-884b-91c80e0ef861","Type":"ContainerDied","Data":"8add5270d42bf1b8365ee8fb2422061f019cf6299e0624d89e7409ae6c5dad3e"} Sep 30 17:34:04 crc kubenswrapper[4818]: I0930 17:34:04.424718 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8tt4f" event={"ID":"e5ae4d8c-0c5e-408f-884b-91c80e0ef861","Type":"ContainerStarted","Data":"e3994cf975a31cf65a14989628f681068ae3081f626834feb9880d86784ab4ca"} Sep 30 17:34:04 crc kubenswrapper[4818]: I0930 17:34:04.451635 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8tt4f" podStartSLOduration=1.954147946 podStartE2EDuration="4.451615189s" podCreationTimestamp="2025-09-30 17:34:00 +0000 UTC" firstStartedPulling="2025-09-30 17:34:01.388919317 +0000 UTC m=+2088.143191143" lastFinishedPulling="2025-09-30 17:34:03.88638655 +0000 UTC m=+2090.640658386" observedRunningTime="2025-09-30 17:34:04.446689916 +0000 UTC m=+2091.200961732" watchObservedRunningTime="2025-09-30 17:34:04.451615189 +0000 UTC m=+2091.205887015" Sep 30 17:34:08 crc kubenswrapper[4818]: I0930 17:34:08.466193 4818 generic.go:334] "Generic (PLEG): container finished" podID="9c5f3844-a335-4cb1-b9d6-f22e0ee9d295" containerID="6efba672397bd827cb22f2d66c816599b37d8aebad5326d47e87bb9dc43098fc" exitCode=0 Sep 30 17:34:08 crc kubenswrapper[4818]: I0930 17:34:08.466305 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bz92g/must-gather-2dm2p" event={"ID":"9c5f3844-a335-4cb1-b9d6-f22e0ee9d295","Type":"ContainerDied","Data":"6efba672397bd827cb22f2d66c816599b37d8aebad5326d47e87bb9dc43098fc"} Sep 30 17:34:08 crc kubenswrapper[4818]: I0930 17:34:08.467789 4818 scope.go:117] "RemoveContainer" containerID="6efba672397bd827cb22f2d66c816599b37d8aebad5326d47e87bb9dc43098fc" Sep 30 17:34:09 crc kubenswrapper[4818]: I0930 17:34:09.259514 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-bz92g_must-gather-2dm2p_9c5f3844-a335-4cb1-b9d6-f22e0ee9d295/gather/0.log" Sep 30 17:34:10 crc kubenswrapper[4818]: I0930 17:34:10.472689 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8tt4f" Sep 30 17:34:10 crc kubenswrapper[4818]: I0930 17:34:10.472738 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8tt4f" Sep 30 17:34:10 crc kubenswrapper[4818]: I0930 17:34:10.580805 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8tt4f" Sep 30 17:34:10 crc kubenswrapper[4818]: I0930 17:34:10.652124 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8tt4f" Sep 30 17:34:14 crc kubenswrapper[4818]: I0930 17:34:14.132803 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8tt4f"] Sep 30 17:34:14 crc kubenswrapper[4818]: I0930 17:34:14.133789 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8tt4f" podUID="e5ae4d8c-0c5e-408f-884b-91c80e0ef861" containerName="registry-server" containerID="cri-o://e3994cf975a31cf65a14989628f681068ae3081f626834feb9880d86784ab4ca" gracePeriod=2 Sep 30 17:34:14 crc kubenswrapper[4818]: I0930 17:34:14.549203 4818 generic.go:334] "Generic (PLEG): container finished" podID="e5ae4d8c-0c5e-408f-884b-91c80e0ef861" containerID="e3994cf975a31cf65a14989628f681068ae3081f626834feb9880d86784ab4ca" exitCode=0 Sep 30 17:34:14 crc kubenswrapper[4818]: I0930 17:34:14.549246 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8tt4f" event={"ID":"e5ae4d8c-0c5e-408f-884b-91c80e0ef861","Type":"ContainerDied","Data":"e3994cf975a31cf65a14989628f681068ae3081f626834feb9880d86784ab4ca"} Sep 30 17:34:14 crc kubenswrapper[4818]: I0930 17:34:14.549269 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8tt4f" event={"ID":"e5ae4d8c-0c5e-408f-884b-91c80e0ef861","Type":"ContainerDied","Data":"6b27bc6243c69a6fa7edf0d3597dde776e9649dc2f530f86db0f5caa2f25f094"} Sep 30 17:34:14 crc kubenswrapper[4818]: I0930 17:34:14.549279 4818 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b27bc6243c69a6fa7edf0d3597dde776e9649dc2f530f86db0f5caa2f25f094" Sep 30 17:34:14 crc kubenswrapper[4818]: I0930 17:34:14.564424 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8tt4f" Sep 30 17:34:14 crc kubenswrapper[4818]: I0930 17:34:14.671098 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5ae4d8c-0c5e-408f-884b-91c80e0ef861-catalog-content\") pod \"e5ae4d8c-0c5e-408f-884b-91c80e0ef861\" (UID: \"e5ae4d8c-0c5e-408f-884b-91c80e0ef861\") " Sep 30 17:34:14 crc kubenswrapper[4818]: I0930 17:34:14.671147 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5ae4d8c-0c5e-408f-884b-91c80e0ef861-utilities\") pod \"e5ae4d8c-0c5e-408f-884b-91c80e0ef861\" (UID: \"e5ae4d8c-0c5e-408f-884b-91c80e0ef861\") " Sep 30 17:34:14 crc kubenswrapper[4818]: I0930 17:34:14.671268 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bd6b\" (UniqueName: \"kubernetes.io/projected/e5ae4d8c-0c5e-408f-884b-91c80e0ef861-kube-api-access-8bd6b\") pod \"e5ae4d8c-0c5e-408f-884b-91c80e0ef861\" (UID: \"e5ae4d8c-0c5e-408f-884b-91c80e0ef861\") " Sep 30 17:34:14 crc kubenswrapper[4818]: I0930 17:34:14.673302 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5ae4d8c-0c5e-408f-884b-91c80e0ef861-utilities" (OuterVolumeSpecName: "utilities") pod "e5ae4d8c-0c5e-408f-884b-91c80e0ef861" (UID: "e5ae4d8c-0c5e-408f-884b-91c80e0ef861"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:34:14 crc kubenswrapper[4818]: I0930 17:34:14.697141 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5ae4d8c-0c5e-408f-884b-91c80e0ef861-kube-api-access-8bd6b" (OuterVolumeSpecName: "kube-api-access-8bd6b") pod "e5ae4d8c-0c5e-408f-884b-91c80e0ef861" (UID: "e5ae4d8c-0c5e-408f-884b-91c80e0ef861"). InnerVolumeSpecName "kube-api-access-8bd6b". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:34:14 crc kubenswrapper[4818]: I0930 17:34:14.745507 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5ae4d8c-0c5e-408f-884b-91c80e0ef861-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e5ae4d8c-0c5e-408f-884b-91c80e0ef861" (UID: "e5ae4d8c-0c5e-408f-884b-91c80e0ef861"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:34:14 crc kubenswrapper[4818]: I0930 17:34:14.772805 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bd6b\" (UniqueName: \"kubernetes.io/projected/e5ae4d8c-0c5e-408f-884b-91c80e0ef861-kube-api-access-8bd6b\") on node \"crc\" DevicePath \"\"" Sep 30 17:34:14 crc kubenswrapper[4818]: I0930 17:34:14.772844 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5ae4d8c-0c5e-408f-884b-91c80e0ef861-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 17:34:14 crc kubenswrapper[4818]: I0930 17:34:14.772853 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5ae4d8c-0c5e-408f-884b-91c80e0ef861-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 17:34:15 crc kubenswrapper[4818]: I0930 17:34:15.557661 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8tt4f" Sep 30 17:34:15 crc kubenswrapper[4818]: I0930 17:34:15.605815 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8tt4f"] Sep 30 17:34:15 crc kubenswrapper[4818]: I0930 17:34:15.613612 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8tt4f"] Sep 30 17:34:16 crc kubenswrapper[4818]: I0930 17:34:16.033540 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5ae4d8c-0c5e-408f-884b-91c80e0ef861" path="/var/lib/kubelet/pods/e5ae4d8c-0c5e-408f-884b-91c80e0ef861/volumes" Sep 30 17:34:16 crc kubenswrapper[4818]: I0930 17:34:16.687093 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-bz92g/must-gather-2dm2p"] Sep 30 17:34:16 crc kubenswrapper[4818]: I0930 17:34:16.687789 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-bz92g/must-gather-2dm2p" podUID="9c5f3844-a335-4cb1-b9d6-f22e0ee9d295" containerName="copy" containerID="cri-o://79cb4cb5cf9ef496cbd0e2efb69187c275a3643b654af71bfbbcccd74df505e2" gracePeriod=2 Sep 30 17:34:16 crc kubenswrapper[4818]: I0930 17:34:16.694407 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-bz92g/must-gather-2dm2p"] Sep 30 17:34:17 crc kubenswrapper[4818]: I0930 17:34:17.153327 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-bz92g_must-gather-2dm2p_9c5f3844-a335-4cb1-b9d6-f22e0ee9d295/copy/0.log" Sep 30 17:34:17 crc kubenswrapper[4818]: I0930 17:34:17.153715 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bz92g/must-gather-2dm2p" Sep 30 17:34:17 crc kubenswrapper[4818]: I0930 17:34:17.214514 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9c5f3844-a335-4cb1-b9d6-f22e0ee9d295-must-gather-output\") pod \"9c5f3844-a335-4cb1-b9d6-f22e0ee9d295\" (UID: \"9c5f3844-a335-4cb1-b9d6-f22e0ee9d295\") " Sep 30 17:34:17 crc kubenswrapper[4818]: I0930 17:34:17.214628 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26hbh\" (UniqueName: \"kubernetes.io/projected/9c5f3844-a335-4cb1-b9d6-f22e0ee9d295-kube-api-access-26hbh\") pod \"9c5f3844-a335-4cb1-b9d6-f22e0ee9d295\" (UID: \"9c5f3844-a335-4cb1-b9d6-f22e0ee9d295\") " Sep 30 17:34:17 crc kubenswrapper[4818]: I0930 17:34:17.221045 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c5f3844-a335-4cb1-b9d6-f22e0ee9d295-kube-api-access-26hbh" (OuterVolumeSpecName: "kube-api-access-26hbh") pod "9c5f3844-a335-4cb1-b9d6-f22e0ee9d295" (UID: "9c5f3844-a335-4cb1-b9d6-f22e0ee9d295"). InnerVolumeSpecName "kube-api-access-26hbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:34:17 crc kubenswrapper[4818]: I0930 17:34:17.315796 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c5f3844-a335-4cb1-b9d6-f22e0ee9d295-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "9c5f3844-a335-4cb1-b9d6-f22e0ee9d295" (UID: "9c5f3844-a335-4cb1-b9d6-f22e0ee9d295"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:34:17 crc kubenswrapper[4818]: I0930 17:34:17.316700 4818 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9c5f3844-a335-4cb1-b9d6-f22e0ee9d295-must-gather-output\") on node \"crc\" DevicePath \"\"" Sep 30 17:34:17 crc kubenswrapper[4818]: I0930 17:34:17.316725 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26hbh\" (UniqueName: \"kubernetes.io/projected/9c5f3844-a335-4cb1-b9d6-f22e0ee9d295-kube-api-access-26hbh\") on node \"crc\" DevicePath \"\"" Sep 30 17:34:17 crc kubenswrapper[4818]: I0930 17:34:17.578346 4818 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-bz92g_must-gather-2dm2p_9c5f3844-a335-4cb1-b9d6-f22e0ee9d295/copy/0.log" Sep 30 17:34:17 crc kubenswrapper[4818]: I0930 17:34:17.579301 4818 generic.go:334] "Generic (PLEG): container finished" podID="9c5f3844-a335-4cb1-b9d6-f22e0ee9d295" containerID="79cb4cb5cf9ef496cbd0e2efb69187c275a3643b654af71bfbbcccd74df505e2" exitCode=143 Sep 30 17:34:17 crc kubenswrapper[4818]: I0930 17:34:17.579362 4818 scope.go:117] "RemoveContainer" containerID="79cb4cb5cf9ef496cbd0e2efb69187c275a3643b654af71bfbbcccd74df505e2" Sep 30 17:34:17 crc kubenswrapper[4818]: I0930 17:34:17.579368 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bz92g/must-gather-2dm2p" Sep 30 17:34:17 crc kubenswrapper[4818]: I0930 17:34:17.610555 4818 scope.go:117] "RemoveContainer" containerID="6efba672397bd827cb22f2d66c816599b37d8aebad5326d47e87bb9dc43098fc" Sep 30 17:34:17 crc kubenswrapper[4818]: I0930 17:34:17.671113 4818 scope.go:117] "RemoveContainer" containerID="79cb4cb5cf9ef496cbd0e2efb69187c275a3643b654af71bfbbcccd74df505e2" Sep 30 17:34:17 crc kubenswrapper[4818]: E0930 17:34:17.671577 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79cb4cb5cf9ef496cbd0e2efb69187c275a3643b654af71bfbbcccd74df505e2\": container with ID starting with 79cb4cb5cf9ef496cbd0e2efb69187c275a3643b654af71bfbbcccd74df505e2 not found: ID does not exist" containerID="79cb4cb5cf9ef496cbd0e2efb69187c275a3643b654af71bfbbcccd74df505e2" Sep 30 17:34:17 crc kubenswrapper[4818]: I0930 17:34:17.671617 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79cb4cb5cf9ef496cbd0e2efb69187c275a3643b654af71bfbbcccd74df505e2"} err="failed to get container status \"79cb4cb5cf9ef496cbd0e2efb69187c275a3643b654af71bfbbcccd74df505e2\": rpc error: code = NotFound desc = could not find container \"79cb4cb5cf9ef496cbd0e2efb69187c275a3643b654af71bfbbcccd74df505e2\": container with ID starting with 79cb4cb5cf9ef496cbd0e2efb69187c275a3643b654af71bfbbcccd74df505e2 not found: ID does not exist" Sep 30 17:34:17 crc kubenswrapper[4818]: I0930 17:34:17.671650 4818 scope.go:117] "RemoveContainer" containerID="6efba672397bd827cb22f2d66c816599b37d8aebad5326d47e87bb9dc43098fc" Sep 30 17:34:17 crc kubenswrapper[4818]: E0930 17:34:17.672010 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6efba672397bd827cb22f2d66c816599b37d8aebad5326d47e87bb9dc43098fc\": container with ID starting with 6efba672397bd827cb22f2d66c816599b37d8aebad5326d47e87bb9dc43098fc not found: ID does not exist" containerID="6efba672397bd827cb22f2d66c816599b37d8aebad5326d47e87bb9dc43098fc" Sep 30 17:34:17 crc kubenswrapper[4818]: I0930 17:34:17.672062 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6efba672397bd827cb22f2d66c816599b37d8aebad5326d47e87bb9dc43098fc"} err="failed to get container status \"6efba672397bd827cb22f2d66c816599b37d8aebad5326d47e87bb9dc43098fc\": rpc error: code = NotFound desc = could not find container \"6efba672397bd827cb22f2d66c816599b37d8aebad5326d47e87bb9dc43098fc\": container with ID starting with 6efba672397bd827cb22f2d66c816599b37d8aebad5326d47e87bb9dc43098fc not found: ID does not exist" Sep 30 17:34:18 crc kubenswrapper[4818]: I0930 17:34:18.034088 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c5f3844-a335-4cb1-b9d6-f22e0ee9d295" path="/var/lib/kubelet/pods/9c5f3844-a335-4cb1-b9d6-f22e0ee9d295/volumes" Sep 30 17:34:24 crc kubenswrapper[4818]: I0930 17:34:24.574348 4818 scope.go:117] "RemoveContainer" containerID="13cb3a5ce920b756ed9cf3d9e0a29318cccafdc4ed3ca8ec8fd6a6251d2b3256" Sep 30 17:34:24 crc kubenswrapper[4818]: I0930 17:34:24.617717 4818 scope.go:117] "RemoveContainer" containerID="28ad73224de683d5b1daf56faeb3e739184d5088b3f6632e4258dc5843069878" Sep 30 17:34:24 crc kubenswrapper[4818]: I0930 17:34:24.703440 4818 scope.go:117] "RemoveContainer" containerID="817bb379866e07743faebf871207d7a7068346de9d13c89d998266a97553bdf5" Sep 30 17:34:24 crc kubenswrapper[4818]: I0930 17:34:24.732141 4818 scope.go:117] "RemoveContainer" containerID="1d035176cf8c9777ef092070627b3a9109e143d9bed407bcb20f16d1dec29bc3" Sep 30 17:34:24 crc kubenswrapper[4818]: I0930 17:34:24.768267 4818 scope.go:117] "RemoveContainer" containerID="7258583eae528003d18118957a309983e8d5ba6f32e766408183c96837cb2398" Sep 30 17:34:50 crc kubenswrapper[4818]: I0930 17:34:50.738569 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4mwht"] Sep 30 17:34:50 crc kubenswrapper[4818]: E0930 17:34:50.739414 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5ae4d8c-0c5e-408f-884b-91c80e0ef861" containerName="extract-utilities" Sep 30 17:34:50 crc kubenswrapper[4818]: I0930 17:34:50.739430 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5ae4d8c-0c5e-408f-884b-91c80e0ef861" containerName="extract-utilities" Sep 30 17:34:50 crc kubenswrapper[4818]: E0930 17:34:50.739454 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5ae4d8c-0c5e-408f-884b-91c80e0ef861" containerName="registry-server" Sep 30 17:34:50 crc kubenswrapper[4818]: I0930 17:34:50.739461 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5ae4d8c-0c5e-408f-884b-91c80e0ef861" containerName="registry-server" Sep 30 17:34:50 crc kubenswrapper[4818]: E0930 17:34:50.739471 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c5f3844-a335-4cb1-b9d6-f22e0ee9d295" containerName="gather" Sep 30 17:34:50 crc kubenswrapper[4818]: I0930 17:34:50.739478 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c5f3844-a335-4cb1-b9d6-f22e0ee9d295" containerName="gather" Sep 30 17:34:50 crc kubenswrapper[4818]: E0930 17:34:50.739491 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5ae4d8c-0c5e-408f-884b-91c80e0ef861" containerName="extract-content" Sep 30 17:34:50 crc kubenswrapper[4818]: I0930 17:34:50.739497 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5ae4d8c-0c5e-408f-884b-91c80e0ef861" containerName="extract-content" Sep 30 17:34:50 crc kubenswrapper[4818]: E0930 17:34:50.739509 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c5f3844-a335-4cb1-b9d6-f22e0ee9d295" containerName="copy" Sep 30 17:34:50 crc kubenswrapper[4818]: I0930 17:34:50.739514 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c5f3844-a335-4cb1-b9d6-f22e0ee9d295" containerName="copy" Sep 30 17:34:50 crc kubenswrapper[4818]: I0930 17:34:50.739656 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c5f3844-a335-4cb1-b9d6-f22e0ee9d295" containerName="gather" Sep 30 17:34:50 crc kubenswrapper[4818]: I0930 17:34:50.739669 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c5f3844-a335-4cb1-b9d6-f22e0ee9d295" containerName="copy" Sep 30 17:34:50 crc kubenswrapper[4818]: I0930 17:34:50.739683 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5ae4d8c-0c5e-408f-884b-91c80e0ef861" containerName="registry-server" Sep 30 17:34:50 crc kubenswrapper[4818]: I0930 17:34:50.740837 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4mwht" Sep 30 17:34:50 crc kubenswrapper[4818]: I0930 17:34:50.753953 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4mwht"] Sep 30 17:34:50 crc kubenswrapper[4818]: I0930 17:34:50.901351 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8fc09609-9001-4ce8-b91e-6ab36da8b955-catalog-content\") pod \"redhat-marketplace-4mwht\" (UID: \"8fc09609-9001-4ce8-b91e-6ab36da8b955\") " pod="openshift-marketplace/redhat-marketplace-4mwht" Sep 30 17:34:50 crc kubenswrapper[4818]: I0930 17:34:50.901433 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8fc09609-9001-4ce8-b91e-6ab36da8b955-utilities\") pod \"redhat-marketplace-4mwht\" (UID: \"8fc09609-9001-4ce8-b91e-6ab36da8b955\") " pod="openshift-marketplace/redhat-marketplace-4mwht" Sep 30 17:34:50 crc kubenswrapper[4818]: I0930 17:34:50.901537 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6k68\" (UniqueName: \"kubernetes.io/projected/8fc09609-9001-4ce8-b91e-6ab36da8b955-kube-api-access-l6k68\") pod \"redhat-marketplace-4mwht\" (UID: \"8fc09609-9001-4ce8-b91e-6ab36da8b955\") " pod="openshift-marketplace/redhat-marketplace-4mwht" Sep 30 17:34:51 crc kubenswrapper[4818]: I0930 17:34:51.003054 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8fc09609-9001-4ce8-b91e-6ab36da8b955-utilities\") pod \"redhat-marketplace-4mwht\" (UID: \"8fc09609-9001-4ce8-b91e-6ab36da8b955\") " pod="openshift-marketplace/redhat-marketplace-4mwht" Sep 30 17:34:51 crc kubenswrapper[4818]: I0930 17:34:51.003185 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6k68\" (UniqueName: \"kubernetes.io/projected/8fc09609-9001-4ce8-b91e-6ab36da8b955-kube-api-access-l6k68\") pod \"redhat-marketplace-4mwht\" (UID: \"8fc09609-9001-4ce8-b91e-6ab36da8b955\") " pod="openshift-marketplace/redhat-marketplace-4mwht" Sep 30 17:34:51 crc kubenswrapper[4818]: I0930 17:34:51.003317 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8fc09609-9001-4ce8-b91e-6ab36da8b955-catalog-content\") pod \"redhat-marketplace-4mwht\" (UID: \"8fc09609-9001-4ce8-b91e-6ab36da8b955\") " pod="openshift-marketplace/redhat-marketplace-4mwht" Sep 30 17:34:51 crc kubenswrapper[4818]: I0930 17:34:51.003768 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8fc09609-9001-4ce8-b91e-6ab36da8b955-utilities\") pod \"redhat-marketplace-4mwht\" (UID: \"8fc09609-9001-4ce8-b91e-6ab36da8b955\") " pod="openshift-marketplace/redhat-marketplace-4mwht" Sep 30 17:34:51 crc kubenswrapper[4818]: I0930 17:34:51.003839 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8fc09609-9001-4ce8-b91e-6ab36da8b955-catalog-content\") pod \"redhat-marketplace-4mwht\" (UID: \"8fc09609-9001-4ce8-b91e-6ab36da8b955\") " pod="openshift-marketplace/redhat-marketplace-4mwht" Sep 30 17:34:51 crc kubenswrapper[4818]: I0930 17:34:51.027749 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6k68\" (UniqueName: \"kubernetes.io/projected/8fc09609-9001-4ce8-b91e-6ab36da8b955-kube-api-access-l6k68\") pod \"redhat-marketplace-4mwht\" (UID: \"8fc09609-9001-4ce8-b91e-6ab36da8b955\") " pod="openshift-marketplace/redhat-marketplace-4mwht" Sep 30 17:34:51 crc kubenswrapper[4818]: I0930 17:34:51.056511 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4mwht" Sep 30 17:34:51 crc kubenswrapper[4818]: I0930 17:34:51.497783 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4mwht"] Sep 30 17:34:51 crc kubenswrapper[4818]: I0930 17:34:51.915755 4818 generic.go:334] "Generic (PLEG): container finished" podID="8fc09609-9001-4ce8-b91e-6ab36da8b955" containerID="6517b76f3ce233cf5982cd8066b9e189d85d425f46d0ee45841a8a8fe4732628" exitCode=0 Sep 30 17:34:51 crc kubenswrapper[4818]: I0930 17:34:51.915798 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4mwht" event={"ID":"8fc09609-9001-4ce8-b91e-6ab36da8b955","Type":"ContainerDied","Data":"6517b76f3ce233cf5982cd8066b9e189d85d425f46d0ee45841a8a8fe4732628"} Sep 30 17:34:51 crc kubenswrapper[4818]: I0930 17:34:51.915824 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4mwht" event={"ID":"8fc09609-9001-4ce8-b91e-6ab36da8b955","Type":"ContainerStarted","Data":"63022b36a5094b682142fc1fd4fb0f25167120c4786d4e31a263bcdbc89ff591"} Sep 30 17:34:52 crc kubenswrapper[4818]: I0930 17:34:52.595672 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:34:52 crc kubenswrapper[4818]: I0930 17:34:52.596121 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:34:52 crc kubenswrapper[4818]: I0930 17:34:52.925834 4818 generic.go:334] "Generic (PLEG): container finished" podID="8fc09609-9001-4ce8-b91e-6ab36da8b955" containerID="ff768228b8bc9b247e344d886d8b0f378e98c0ec60848f8f91fd798fc7a140af" exitCode=0 Sep 30 17:34:52 crc kubenswrapper[4818]: I0930 17:34:52.926595 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4mwht" event={"ID":"8fc09609-9001-4ce8-b91e-6ab36da8b955","Type":"ContainerDied","Data":"ff768228b8bc9b247e344d886d8b0f378e98c0ec60848f8f91fd798fc7a140af"} Sep 30 17:34:53 crc kubenswrapper[4818]: I0930 17:34:53.936892 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4mwht" event={"ID":"8fc09609-9001-4ce8-b91e-6ab36da8b955","Type":"ContainerStarted","Data":"7c176a81760d547c3fc8a87814ac1cf579d14ac2b817c131d789506c8202fb5c"} Sep 30 17:34:53 crc kubenswrapper[4818]: I0930 17:34:53.968415 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4mwht" podStartSLOduration=2.299646 podStartE2EDuration="3.968388178s" podCreationTimestamp="2025-09-30 17:34:50 +0000 UTC" firstStartedPulling="2025-09-30 17:34:51.91830579 +0000 UTC m=+2138.672577606" lastFinishedPulling="2025-09-30 17:34:53.587047938 +0000 UTC m=+2140.341319784" observedRunningTime="2025-09-30 17:34:53.961363447 +0000 UTC m=+2140.715635283" watchObservedRunningTime="2025-09-30 17:34:53.968388178 +0000 UTC m=+2140.722660004" Sep 30 17:35:01 crc kubenswrapper[4818]: I0930 17:35:01.057127 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4mwht" Sep 30 17:35:01 crc kubenswrapper[4818]: I0930 17:35:01.057857 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4mwht" Sep 30 17:35:01 crc kubenswrapper[4818]: I0930 17:35:01.138414 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4mwht" Sep 30 17:35:02 crc kubenswrapper[4818]: I0930 17:35:02.073247 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4mwht" Sep 30 17:35:02 crc kubenswrapper[4818]: I0930 17:35:02.548577 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9mpph"] Sep 30 17:35:02 crc kubenswrapper[4818]: I0930 17:35:02.550331 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9mpph" Sep 30 17:35:02 crc kubenswrapper[4818]: I0930 17:35:02.576374 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9mpph"] Sep 30 17:35:02 crc kubenswrapper[4818]: I0930 17:35:02.728777 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbv9t\" (UniqueName: \"kubernetes.io/projected/c3bf4ef9-f620-4725-8298-71ab6748dad8-kube-api-access-hbv9t\") pod \"certified-operators-9mpph\" (UID: \"c3bf4ef9-f620-4725-8298-71ab6748dad8\") " pod="openshift-marketplace/certified-operators-9mpph" Sep 30 17:35:02 crc kubenswrapper[4818]: I0930 17:35:02.728840 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3bf4ef9-f620-4725-8298-71ab6748dad8-utilities\") pod \"certified-operators-9mpph\" (UID: \"c3bf4ef9-f620-4725-8298-71ab6748dad8\") " pod="openshift-marketplace/certified-operators-9mpph" Sep 30 17:35:02 crc kubenswrapper[4818]: I0930 17:35:02.728907 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3bf4ef9-f620-4725-8298-71ab6748dad8-catalog-content\") pod \"certified-operators-9mpph\" (UID: \"c3bf4ef9-f620-4725-8298-71ab6748dad8\") " pod="openshift-marketplace/certified-operators-9mpph" Sep 30 17:35:02 crc kubenswrapper[4818]: I0930 17:35:02.830113 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbv9t\" (UniqueName: \"kubernetes.io/projected/c3bf4ef9-f620-4725-8298-71ab6748dad8-kube-api-access-hbv9t\") pod \"certified-operators-9mpph\" (UID: \"c3bf4ef9-f620-4725-8298-71ab6748dad8\") " pod="openshift-marketplace/certified-operators-9mpph" Sep 30 17:35:02 crc kubenswrapper[4818]: I0930 17:35:02.830188 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3bf4ef9-f620-4725-8298-71ab6748dad8-utilities\") pod \"certified-operators-9mpph\" (UID: \"c3bf4ef9-f620-4725-8298-71ab6748dad8\") " pod="openshift-marketplace/certified-operators-9mpph" Sep 30 17:35:02 crc kubenswrapper[4818]: I0930 17:35:02.830263 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3bf4ef9-f620-4725-8298-71ab6748dad8-catalog-content\") pod \"certified-operators-9mpph\" (UID: \"c3bf4ef9-f620-4725-8298-71ab6748dad8\") " pod="openshift-marketplace/certified-operators-9mpph" Sep 30 17:35:02 crc kubenswrapper[4818]: I0930 17:35:02.830943 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3bf4ef9-f620-4725-8298-71ab6748dad8-utilities\") pod \"certified-operators-9mpph\" (UID: \"c3bf4ef9-f620-4725-8298-71ab6748dad8\") " pod="openshift-marketplace/certified-operators-9mpph" Sep 30 17:35:02 crc kubenswrapper[4818]: I0930 17:35:02.830954 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3bf4ef9-f620-4725-8298-71ab6748dad8-catalog-content\") pod \"certified-operators-9mpph\" (UID: \"c3bf4ef9-f620-4725-8298-71ab6748dad8\") " pod="openshift-marketplace/certified-operators-9mpph" Sep 30 17:35:02 crc kubenswrapper[4818]: I0930 17:35:02.864679 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbv9t\" (UniqueName: \"kubernetes.io/projected/c3bf4ef9-f620-4725-8298-71ab6748dad8-kube-api-access-hbv9t\") pod \"certified-operators-9mpph\" (UID: \"c3bf4ef9-f620-4725-8298-71ab6748dad8\") " pod="openshift-marketplace/certified-operators-9mpph" Sep 30 17:35:02 crc kubenswrapper[4818]: I0930 17:35:02.874475 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9mpph" Sep 30 17:35:03 crc kubenswrapper[4818]: I0930 17:35:03.172912 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9mpph"] Sep 30 17:35:04 crc kubenswrapper[4818]: I0930 17:35:04.049250 4818 generic.go:334] "Generic (PLEG): container finished" podID="c3bf4ef9-f620-4725-8298-71ab6748dad8" containerID="8f477d5d8b8ecffde5d83966e10be63788a9917242b9e5bf3998e94c4896d8e3" exitCode=0 Sep 30 17:35:04 crc kubenswrapper[4818]: I0930 17:35:04.049558 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9mpph" event={"ID":"c3bf4ef9-f620-4725-8298-71ab6748dad8","Type":"ContainerDied","Data":"8f477d5d8b8ecffde5d83966e10be63788a9917242b9e5bf3998e94c4896d8e3"} Sep 30 17:35:04 crc kubenswrapper[4818]: I0930 17:35:04.049590 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9mpph" event={"ID":"c3bf4ef9-f620-4725-8298-71ab6748dad8","Type":"ContainerStarted","Data":"1a84e6cf4f04035dd04e7496487d1a8066c58476c08dec7f2cc7213095654d29"} Sep 30 17:35:04 crc kubenswrapper[4818]: I0930 17:35:04.051248 4818 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 30 17:35:05 crc kubenswrapper[4818]: I0930 17:35:05.930611 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4mwht"] Sep 30 17:35:05 crc kubenswrapper[4818]: I0930 17:35:05.931609 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4mwht" podUID="8fc09609-9001-4ce8-b91e-6ab36da8b955" containerName="registry-server" containerID="cri-o://7c176a81760d547c3fc8a87814ac1cf579d14ac2b817c131d789506c8202fb5c" gracePeriod=2 Sep 30 17:35:06 crc kubenswrapper[4818]: I0930 17:35:06.067148 4818 generic.go:334] "Generic (PLEG): container finished" podID="8fc09609-9001-4ce8-b91e-6ab36da8b955" containerID="7c176a81760d547c3fc8a87814ac1cf579d14ac2b817c131d789506c8202fb5c" exitCode=0 Sep 30 17:35:06 crc kubenswrapper[4818]: I0930 17:35:06.067227 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4mwht" event={"ID":"8fc09609-9001-4ce8-b91e-6ab36da8b955","Type":"ContainerDied","Data":"7c176a81760d547c3fc8a87814ac1cf579d14ac2b817c131d789506c8202fb5c"} Sep 30 17:35:06 crc kubenswrapper[4818]: I0930 17:35:06.077195 4818 generic.go:334] "Generic (PLEG): container finished" podID="c3bf4ef9-f620-4725-8298-71ab6748dad8" containerID="f939208b6e563de82fa39efada5b91e395eb59f77e8319ee5e28feb9225ad9f8" exitCode=0 Sep 30 17:35:06 crc kubenswrapper[4818]: I0930 17:35:06.077251 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9mpph" event={"ID":"c3bf4ef9-f620-4725-8298-71ab6748dad8","Type":"ContainerDied","Data":"f939208b6e563de82fa39efada5b91e395eb59f77e8319ee5e28feb9225ad9f8"} Sep 30 17:35:06 crc kubenswrapper[4818]: I0930 17:35:06.365982 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4mwht" Sep 30 17:35:06 crc kubenswrapper[4818]: I0930 17:35:06.494821 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l6k68\" (UniqueName: \"kubernetes.io/projected/8fc09609-9001-4ce8-b91e-6ab36da8b955-kube-api-access-l6k68\") pod \"8fc09609-9001-4ce8-b91e-6ab36da8b955\" (UID: \"8fc09609-9001-4ce8-b91e-6ab36da8b955\") " Sep 30 17:35:06 crc kubenswrapper[4818]: I0930 17:35:06.495181 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8fc09609-9001-4ce8-b91e-6ab36da8b955-catalog-content\") pod \"8fc09609-9001-4ce8-b91e-6ab36da8b955\" (UID: \"8fc09609-9001-4ce8-b91e-6ab36da8b955\") " Sep 30 17:35:06 crc kubenswrapper[4818]: I0930 17:35:06.495328 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8fc09609-9001-4ce8-b91e-6ab36da8b955-utilities\") pod \"8fc09609-9001-4ce8-b91e-6ab36da8b955\" (UID: \"8fc09609-9001-4ce8-b91e-6ab36da8b955\") " Sep 30 17:35:06 crc kubenswrapper[4818]: I0930 17:35:06.496105 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8fc09609-9001-4ce8-b91e-6ab36da8b955-utilities" (OuterVolumeSpecName: "utilities") pod "8fc09609-9001-4ce8-b91e-6ab36da8b955" (UID: "8fc09609-9001-4ce8-b91e-6ab36da8b955"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:35:06 crc kubenswrapper[4818]: I0930 17:35:06.516836 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fc09609-9001-4ce8-b91e-6ab36da8b955-kube-api-access-l6k68" (OuterVolumeSpecName: "kube-api-access-l6k68") pod "8fc09609-9001-4ce8-b91e-6ab36da8b955" (UID: "8fc09609-9001-4ce8-b91e-6ab36da8b955"). InnerVolumeSpecName "kube-api-access-l6k68". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:35:06 crc kubenswrapper[4818]: I0930 17:35:06.520905 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8fc09609-9001-4ce8-b91e-6ab36da8b955-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8fc09609-9001-4ce8-b91e-6ab36da8b955" (UID: "8fc09609-9001-4ce8-b91e-6ab36da8b955"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:35:06 crc kubenswrapper[4818]: I0930 17:35:06.596995 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l6k68\" (UniqueName: \"kubernetes.io/projected/8fc09609-9001-4ce8-b91e-6ab36da8b955-kube-api-access-l6k68\") on node \"crc\" DevicePath \"\"" Sep 30 17:35:06 crc kubenswrapper[4818]: I0930 17:35:06.597039 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8fc09609-9001-4ce8-b91e-6ab36da8b955-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 17:35:06 crc kubenswrapper[4818]: I0930 17:35:06.597053 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8fc09609-9001-4ce8-b91e-6ab36da8b955-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 17:35:07 crc kubenswrapper[4818]: I0930 17:35:07.088954 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9mpph" event={"ID":"c3bf4ef9-f620-4725-8298-71ab6748dad8","Type":"ContainerStarted","Data":"0fb36a4c40515d5ae19ce7f937189f5fd56c29310b2f5d520b5b211e3b97f2f9"} Sep 30 17:35:07 crc kubenswrapper[4818]: I0930 17:35:07.091643 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4mwht" event={"ID":"8fc09609-9001-4ce8-b91e-6ab36da8b955","Type":"ContainerDied","Data":"63022b36a5094b682142fc1fd4fb0f25167120c4786d4e31a263bcdbc89ff591"} Sep 30 17:35:07 crc kubenswrapper[4818]: I0930 17:35:07.091678 4818 scope.go:117] "RemoveContainer" containerID="7c176a81760d547c3fc8a87814ac1cf579d14ac2b817c131d789506c8202fb5c" Sep 30 17:35:07 crc kubenswrapper[4818]: I0930 17:35:07.091781 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4mwht" Sep 30 17:35:07 crc kubenswrapper[4818]: I0930 17:35:07.111914 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9mpph" podStartSLOduration=2.648915815 podStartE2EDuration="5.111893497s" podCreationTimestamp="2025-09-30 17:35:02 +0000 UTC" firstStartedPulling="2025-09-30 17:35:04.050989428 +0000 UTC m=+2150.805261254" lastFinishedPulling="2025-09-30 17:35:06.51396713 +0000 UTC m=+2153.268238936" observedRunningTime="2025-09-30 17:35:07.106913158 +0000 UTC m=+2153.861185034" watchObservedRunningTime="2025-09-30 17:35:07.111893497 +0000 UTC m=+2153.866165303" Sep 30 17:35:07 crc kubenswrapper[4818]: I0930 17:35:07.117539 4818 scope.go:117] "RemoveContainer" containerID="ff768228b8bc9b247e344d886d8b0f378e98c0ec60848f8f91fd798fc7a140af" Sep 30 17:35:07 crc kubenswrapper[4818]: I0930 17:35:07.140813 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4mwht"] Sep 30 17:35:07 crc kubenswrapper[4818]: I0930 17:35:07.145203 4818 scope.go:117] "RemoveContainer" containerID="6517b76f3ce233cf5982cd8066b9e189d85d425f46d0ee45841a8a8fe4732628" Sep 30 17:35:07 crc kubenswrapper[4818]: I0930 17:35:07.148623 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4mwht"] Sep 30 17:35:08 crc kubenswrapper[4818]: I0930 17:35:08.031109 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8fc09609-9001-4ce8-b91e-6ab36da8b955" path="/var/lib/kubelet/pods/8fc09609-9001-4ce8-b91e-6ab36da8b955/volumes" Sep 30 17:35:12 crc kubenswrapper[4818]: I0930 17:35:12.874944 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9mpph" Sep 30 17:35:12 crc kubenswrapper[4818]: I0930 17:35:12.875563 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9mpph" Sep 30 17:35:12 crc kubenswrapper[4818]: I0930 17:35:12.923511 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9mpph" Sep 30 17:35:13 crc kubenswrapper[4818]: I0930 17:35:13.214783 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9mpph" Sep 30 17:35:15 crc kubenswrapper[4818]: I0930 17:35:15.328964 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9mpph"] Sep 30 17:35:15 crc kubenswrapper[4818]: I0930 17:35:15.330432 4818 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9mpph" podUID="c3bf4ef9-f620-4725-8298-71ab6748dad8" containerName="registry-server" containerID="cri-o://0fb36a4c40515d5ae19ce7f937189f5fd56c29310b2f5d520b5b211e3b97f2f9" gracePeriod=2 Sep 30 17:35:15 crc kubenswrapper[4818]: I0930 17:35:15.899498 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9mpph" Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.062188 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3bf4ef9-f620-4725-8298-71ab6748dad8-utilities\") pod \"c3bf4ef9-f620-4725-8298-71ab6748dad8\" (UID: \"c3bf4ef9-f620-4725-8298-71ab6748dad8\") " Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.062287 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3bf4ef9-f620-4725-8298-71ab6748dad8-catalog-content\") pod \"c3bf4ef9-f620-4725-8298-71ab6748dad8\" (UID: \"c3bf4ef9-f620-4725-8298-71ab6748dad8\") " Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.062436 4818 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hbv9t\" (UniqueName: \"kubernetes.io/projected/c3bf4ef9-f620-4725-8298-71ab6748dad8-kube-api-access-hbv9t\") pod \"c3bf4ef9-f620-4725-8298-71ab6748dad8\" (UID: \"c3bf4ef9-f620-4725-8298-71ab6748dad8\") " Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.064378 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3bf4ef9-f620-4725-8298-71ab6748dad8-utilities" (OuterVolumeSpecName: "utilities") pod "c3bf4ef9-f620-4725-8298-71ab6748dad8" (UID: "c3bf4ef9-f620-4725-8298-71ab6748dad8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.069746 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3bf4ef9-f620-4725-8298-71ab6748dad8-kube-api-access-hbv9t" (OuterVolumeSpecName: "kube-api-access-hbv9t") pod "c3bf4ef9-f620-4725-8298-71ab6748dad8" (UID: "c3bf4ef9-f620-4725-8298-71ab6748dad8"). InnerVolumeSpecName "kube-api-access-hbv9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.123868 4818 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3bf4ef9-f620-4725-8298-71ab6748dad8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c3bf4ef9-f620-4725-8298-71ab6748dad8" (UID: "c3bf4ef9-f620-4725-8298-71ab6748dad8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.164197 4818 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hbv9t\" (UniqueName: \"kubernetes.io/projected/c3bf4ef9-f620-4725-8298-71ab6748dad8-kube-api-access-hbv9t\") on node \"crc\" DevicePath \"\"" Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.164234 4818 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3bf4ef9-f620-4725-8298-71ab6748dad8-utilities\") on node \"crc\" DevicePath \"\"" Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.164249 4818 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3bf4ef9-f620-4725-8298-71ab6748dad8-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.166820 4818 generic.go:334] "Generic (PLEG): container finished" podID="c3bf4ef9-f620-4725-8298-71ab6748dad8" containerID="0fb36a4c40515d5ae19ce7f937189f5fd56c29310b2f5d520b5b211e3b97f2f9" exitCode=0 Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.166869 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9mpph" event={"ID":"c3bf4ef9-f620-4725-8298-71ab6748dad8","Type":"ContainerDied","Data":"0fb36a4c40515d5ae19ce7f937189f5fd56c29310b2f5d520b5b211e3b97f2f9"} Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.166894 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9mpph" event={"ID":"c3bf4ef9-f620-4725-8298-71ab6748dad8","Type":"ContainerDied","Data":"1a84e6cf4f04035dd04e7496487d1a8066c58476c08dec7f2cc7213095654d29"} Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.166911 4818 scope.go:117] "RemoveContainer" containerID="0fb36a4c40515d5ae19ce7f937189f5fd56c29310b2f5d520b5b211e3b97f2f9" Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.167060 4818 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9mpph" Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.198830 4818 scope.go:117] "RemoveContainer" containerID="f939208b6e563de82fa39efada5b91e395eb59f77e8319ee5e28feb9225ad9f8" Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.201645 4818 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9mpph"] Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.206619 4818 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9mpph"] Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.227889 4818 scope.go:117] "RemoveContainer" containerID="8f477d5d8b8ecffde5d83966e10be63788a9917242b9e5bf3998e94c4896d8e3" Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.269272 4818 scope.go:117] "RemoveContainer" containerID="0fb36a4c40515d5ae19ce7f937189f5fd56c29310b2f5d520b5b211e3b97f2f9" Sep 30 17:35:16 crc kubenswrapper[4818]: E0930 17:35:16.269778 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0fb36a4c40515d5ae19ce7f937189f5fd56c29310b2f5d520b5b211e3b97f2f9\": container with ID starting with 0fb36a4c40515d5ae19ce7f937189f5fd56c29310b2f5d520b5b211e3b97f2f9 not found: ID does not exist" containerID="0fb36a4c40515d5ae19ce7f937189f5fd56c29310b2f5d520b5b211e3b97f2f9" Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.269822 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0fb36a4c40515d5ae19ce7f937189f5fd56c29310b2f5d520b5b211e3b97f2f9"} err="failed to get container status \"0fb36a4c40515d5ae19ce7f937189f5fd56c29310b2f5d520b5b211e3b97f2f9\": rpc error: code = NotFound desc = could not find container \"0fb36a4c40515d5ae19ce7f937189f5fd56c29310b2f5d520b5b211e3b97f2f9\": container with ID starting with 0fb36a4c40515d5ae19ce7f937189f5fd56c29310b2f5d520b5b211e3b97f2f9 not found: ID does not exist" Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.269852 4818 scope.go:117] "RemoveContainer" containerID="f939208b6e563de82fa39efada5b91e395eb59f77e8319ee5e28feb9225ad9f8" Sep 30 17:35:16 crc kubenswrapper[4818]: E0930 17:35:16.270263 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f939208b6e563de82fa39efada5b91e395eb59f77e8319ee5e28feb9225ad9f8\": container with ID starting with f939208b6e563de82fa39efada5b91e395eb59f77e8319ee5e28feb9225ad9f8 not found: ID does not exist" containerID="f939208b6e563de82fa39efada5b91e395eb59f77e8319ee5e28feb9225ad9f8" Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.270296 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f939208b6e563de82fa39efada5b91e395eb59f77e8319ee5e28feb9225ad9f8"} err="failed to get container status \"f939208b6e563de82fa39efada5b91e395eb59f77e8319ee5e28feb9225ad9f8\": rpc error: code = NotFound desc = could not find container \"f939208b6e563de82fa39efada5b91e395eb59f77e8319ee5e28feb9225ad9f8\": container with ID starting with f939208b6e563de82fa39efada5b91e395eb59f77e8319ee5e28feb9225ad9f8 not found: ID does not exist" Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.270317 4818 scope.go:117] "RemoveContainer" containerID="8f477d5d8b8ecffde5d83966e10be63788a9917242b9e5bf3998e94c4896d8e3" Sep 30 17:35:16 crc kubenswrapper[4818]: E0930 17:35:16.270562 4818 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f477d5d8b8ecffde5d83966e10be63788a9917242b9e5bf3998e94c4896d8e3\": container with ID starting with 8f477d5d8b8ecffde5d83966e10be63788a9917242b9e5bf3998e94c4896d8e3 not found: ID does not exist" containerID="8f477d5d8b8ecffde5d83966e10be63788a9917242b9e5bf3998e94c4896d8e3" Sep 30 17:35:16 crc kubenswrapper[4818]: I0930 17:35:16.270585 4818 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f477d5d8b8ecffde5d83966e10be63788a9917242b9e5bf3998e94c4896d8e3"} err="failed to get container status \"8f477d5d8b8ecffde5d83966e10be63788a9917242b9e5bf3998e94c4896d8e3\": rpc error: code = NotFound desc = could not find container \"8f477d5d8b8ecffde5d83966e10be63788a9917242b9e5bf3998e94c4896d8e3\": container with ID starting with 8f477d5d8b8ecffde5d83966e10be63788a9917242b9e5bf3998e94c4896d8e3 not found: ID does not exist" Sep 30 17:35:18 crc kubenswrapper[4818]: I0930 17:35:18.031566 4818 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3bf4ef9-f620-4725-8298-71ab6748dad8" path="/var/lib/kubelet/pods/c3bf4ef9-f620-4725-8298-71ab6748dad8/volumes" Sep 30 17:35:22 crc kubenswrapper[4818]: I0930 17:35:22.596216 4818 patch_prober.go:28] interesting pod/machine-config-daemon-vc6ss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 30 17:35:22 crc kubenswrapper[4818]: I0930 17:35:22.596594 4818 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vc6ss" podUID="5e908152-dcb2-4b41-974d-26b03ae0254b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 30 17:35:24 crc kubenswrapper[4818]: I0930 17:35:24.910393 4818 scope.go:117] "RemoveContainer" containerID="a1bc811334aed386d9af54366006dbcb983c45143e6154f200489e70256f2bbe" Sep 30 17:35:24 crc kubenswrapper[4818]: I0930 17:35:24.936202 4818 scope.go:117] "RemoveContainer" containerID="0f6a05764849702bd462c38b716c0e14ab93d2934472600e1de818f1556d5489" Sep 30 17:35:24 crc kubenswrapper[4818]: I0930 17:35:24.979779 4818 scope.go:117] "RemoveContainer" containerID="be5102971e9c13036fbca25c7228887b3ec7872cdb3a02253f1fa5e52ddb4d23" Sep 30 17:35:25 crc kubenswrapper[4818]: I0930 17:35:25.048688 4818 scope.go:117] "RemoveContainer" containerID="3a324f67c5c01ed018e09976ac385b7290a02a911ab894955ecd82c36a2af6ac" Sep 30 17:35:25 crc kubenswrapper[4818]: I0930 17:35:25.064808 4818 scope.go:117] "RemoveContainer" containerID="52c8e418537882e7aca3ca5aa72441aee1daa510c514987b06fd07750818a59f" Sep 30 17:35:25 crc kubenswrapper[4818]: I0930 17:35:25.088858 4818 scope.go:117] "RemoveContainer" containerID="fb5da23b6d8c9e19bb0b41957ac84e4da26333e056b66e9ecc850035f3468bbc" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.544293 4818 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-478b5"] Sep 30 17:35:26 crc kubenswrapper[4818]: E0930 17:35:26.544832 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fc09609-9001-4ce8-b91e-6ab36da8b955" containerName="registry-server" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.544844 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fc09609-9001-4ce8-b91e-6ab36da8b955" containerName="registry-server" Sep 30 17:35:26 crc kubenswrapper[4818]: E0930 17:35:26.544865 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fc09609-9001-4ce8-b91e-6ab36da8b955" containerName="extract-content" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.544872 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fc09609-9001-4ce8-b91e-6ab36da8b955" containerName="extract-content" Sep 30 17:35:26 crc kubenswrapper[4818]: E0930 17:35:26.544882 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fc09609-9001-4ce8-b91e-6ab36da8b955" containerName="extract-utilities" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.544888 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fc09609-9001-4ce8-b91e-6ab36da8b955" containerName="extract-utilities" Sep 30 17:35:26 crc kubenswrapper[4818]: E0930 17:35:26.544914 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3bf4ef9-f620-4725-8298-71ab6748dad8" containerName="extract-content" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.544936 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3bf4ef9-f620-4725-8298-71ab6748dad8" containerName="extract-content" Sep 30 17:35:26 crc kubenswrapper[4818]: E0930 17:35:26.544950 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3bf4ef9-f620-4725-8298-71ab6748dad8" containerName="registry-server" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.544956 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3bf4ef9-f620-4725-8298-71ab6748dad8" containerName="registry-server" Sep 30 17:35:26 crc kubenswrapper[4818]: E0930 17:35:26.544965 4818 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3bf4ef9-f620-4725-8298-71ab6748dad8" containerName="extract-utilities" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.544971 4818 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3bf4ef9-f620-4725-8298-71ab6748dad8" containerName="extract-utilities" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.545100 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3bf4ef9-f620-4725-8298-71ab6748dad8" containerName="registry-server" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.545121 4818 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fc09609-9001-4ce8-b91e-6ab36da8b955" containerName="registry-server" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.546139 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-478b5" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.567914 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-478b5"] Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.630043 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wht78\" (UniqueName: \"kubernetes.io/projected/a63105f1-3191-4451-9746-d2024ad89bd8-kube-api-access-wht78\") pod \"redhat-operators-478b5\" (UID: \"a63105f1-3191-4451-9746-d2024ad89bd8\") " pod="openshift-marketplace/redhat-operators-478b5" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.630129 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a63105f1-3191-4451-9746-d2024ad89bd8-catalog-content\") pod \"redhat-operators-478b5\" (UID: \"a63105f1-3191-4451-9746-d2024ad89bd8\") " pod="openshift-marketplace/redhat-operators-478b5" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.630506 4818 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a63105f1-3191-4451-9746-d2024ad89bd8-utilities\") pod \"redhat-operators-478b5\" (UID: \"a63105f1-3191-4451-9746-d2024ad89bd8\") " pod="openshift-marketplace/redhat-operators-478b5" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.732093 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a63105f1-3191-4451-9746-d2024ad89bd8-utilities\") pod \"redhat-operators-478b5\" (UID: \"a63105f1-3191-4451-9746-d2024ad89bd8\") " pod="openshift-marketplace/redhat-operators-478b5" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.732159 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wht78\" (UniqueName: \"kubernetes.io/projected/a63105f1-3191-4451-9746-d2024ad89bd8-kube-api-access-wht78\") pod \"redhat-operators-478b5\" (UID: \"a63105f1-3191-4451-9746-d2024ad89bd8\") " pod="openshift-marketplace/redhat-operators-478b5" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.732268 4818 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a63105f1-3191-4451-9746-d2024ad89bd8-catalog-content\") pod \"redhat-operators-478b5\" (UID: \"a63105f1-3191-4451-9746-d2024ad89bd8\") " pod="openshift-marketplace/redhat-operators-478b5" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.732653 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a63105f1-3191-4451-9746-d2024ad89bd8-utilities\") pod \"redhat-operators-478b5\" (UID: \"a63105f1-3191-4451-9746-d2024ad89bd8\") " pod="openshift-marketplace/redhat-operators-478b5" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.732708 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a63105f1-3191-4451-9746-d2024ad89bd8-catalog-content\") pod \"redhat-operators-478b5\" (UID: \"a63105f1-3191-4451-9746-d2024ad89bd8\") " pod="openshift-marketplace/redhat-operators-478b5" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.761961 4818 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wht78\" (UniqueName: \"kubernetes.io/projected/a63105f1-3191-4451-9746-d2024ad89bd8-kube-api-access-wht78\") pod \"redhat-operators-478b5\" (UID: \"a63105f1-3191-4451-9746-d2024ad89bd8\") " pod="openshift-marketplace/redhat-operators-478b5" Sep 30 17:35:26 crc kubenswrapper[4818]: I0930 17:35:26.882104 4818 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-478b5" Sep 30 17:35:27 crc kubenswrapper[4818]: I0930 17:35:27.328254 4818 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-478b5"] Sep 30 17:35:28 crc kubenswrapper[4818]: I0930 17:35:28.276373 4818 generic.go:334] "Generic (PLEG): container finished" podID="a63105f1-3191-4451-9746-d2024ad89bd8" containerID="7eab2bdc9150f098542888c656f6e225defe2809cfb9380114f7eb7dfd120dbe" exitCode=0 Sep 30 17:35:28 crc kubenswrapper[4818]: I0930 17:35:28.276460 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-478b5" event={"ID":"a63105f1-3191-4451-9746-d2024ad89bd8","Type":"ContainerDied","Data":"7eab2bdc9150f098542888c656f6e225defe2809cfb9380114f7eb7dfd120dbe"} Sep 30 17:35:28 crc kubenswrapper[4818]: I0930 17:35:28.276627 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-478b5" event={"ID":"a63105f1-3191-4451-9746-d2024ad89bd8","Type":"ContainerStarted","Data":"e3b6f09c31a79bdec7264bf80fcf2e897b0faa4426d982ce6f756aa9cc7b0f96"} Sep 30 17:35:30 crc kubenswrapper[4818]: I0930 17:35:30.298858 4818 generic.go:334] "Generic (PLEG): container finished" podID="a63105f1-3191-4451-9746-d2024ad89bd8" containerID="ef6e1c3f513962a141971a386ace26117e9f2fd35da14dbce6596bb4d710aef4" exitCode=0 Sep 30 17:35:30 crc kubenswrapper[4818]: I0930 17:35:30.298964 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-478b5" event={"ID":"a63105f1-3191-4451-9746-d2024ad89bd8","Type":"ContainerDied","Data":"ef6e1c3f513962a141971a386ace26117e9f2fd35da14dbce6596bb4d710aef4"} Sep 30 17:35:31 crc kubenswrapper[4818]: I0930 17:35:31.309142 4818 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-478b5" event={"ID":"a63105f1-3191-4451-9746-d2024ad89bd8","Type":"ContainerStarted","Data":"6070ad79ec5419919b5ef9d5c0f3f9e5a807035a257cca1f4136b0750937dbfa"} Sep 30 17:35:31 crc kubenswrapper[4818]: I0930 17:35:31.333272 4818 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-478b5" podStartSLOduration=2.871937585 podStartE2EDuration="5.333250325s" podCreationTimestamp="2025-09-30 17:35:26 +0000 UTC" firstStartedPulling="2025-09-30 17:35:28.279402009 +0000 UTC m=+2175.033673865" lastFinishedPulling="2025-09-30 17:35:30.740714789 +0000 UTC m=+2177.494986605" observedRunningTime="2025-09-30 17:35:31.330878054 +0000 UTC m=+2178.085149870" watchObservedRunningTime="2025-09-30 17:35:31.333250325 +0000 UTC m=+2178.087522141" Sep 30 17:35:36 crc kubenswrapper[4818]: I0930 17:35:36.883168 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-478b5" Sep 30 17:35:36 crc kubenswrapper[4818]: I0930 17:35:36.883716 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-478b5" Sep 30 17:35:36 crc kubenswrapper[4818]: I0930 17:35:36.944285 4818 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-478b5" Sep 30 17:35:37 crc kubenswrapper[4818]: I0930 17:35:37.443463 4818 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-478b5" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515067012364024451 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015067012364017366 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015067005701016506 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015067005702015457 5ustar corecore